summaryrefslogtreecommitdiffstats
path: root/test/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 16:04:21 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 16:04:21 +0000
commit8a754e0858d922e955e71b253c139e071ecec432 (patch)
tree527d16e74bfd1840c85efd675fdecad056c54107 /test/lib
parentInitial commit. (diff)
downloadansible-core-8a754e0858d922e955e71b253c139e071ecec432.tar.xz
ansible-core-8a754e0858d922e955e71b253c139e071ecec432.zip
Adding upstream version 2.14.3.upstream/2.14.3upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'test/lib')
-rw-r--r--test/lib/ansible_test/__init__.py2
-rw-r--r--test/lib/ansible_test/_data/ansible.cfg0
-rw-r--r--test/lib/ansible_test/_data/completion/docker.txt9
-rw-r--r--test/lib/ansible_test/_data/completion/network.txt2
-rw-r--r--test/lib/ansible_test/_data/completion/remote.txt16
-rw-r--r--test/lib/ansible_test/_data/completion/windows.txt6
-rw-r--r--test/lib/ansible_test/_data/coveragerc0
-rw-r--r--test/lib/ansible_test/_data/playbooks/posix_coverage_setup.yml21
-rw-r--r--test/lib/ansible_test/_data/playbooks/posix_coverage_teardown.yml8
-rw-r--r--test/lib/ansible_test/_data/playbooks/posix_hosts_prepare.yml9
-rw-r--r--test/lib/ansible_test/_data/playbooks/posix_hosts_restore.yml10
-rw-r--r--test/lib/ansible_test/_data/playbooks/pypi_proxy_prepare.yml23
-rw-r--r--test/lib/ansible_test/_data/playbooks/pypi_proxy_restore.yml12
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml18
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml70
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.ps134
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.yml7
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_hosts_restore.ps138
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_hosts_restore.yml7
-rw-r--r--test/lib/ansible_test/_data/pytest/config/default.ini4
-rw-r--r--test/lib/ansible_test/_data/pytest/config/legacy.ini4
-rw-r--r--test/lib/ansible_test/_data/requirements/ansible-test.txt4
-rw-r--r--test/lib/ansible_test/_data/requirements/ansible.txt15
-rw-r--r--test/lib/ansible_test/_data/requirements/constraints.txt17
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.ansible-doc.in3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt6
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.changelog.in3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.changelog.txt8
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.import.in1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.import.plugin.in2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.import.plugin.txt4
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.import.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.integration-aliases.in1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.mypy.in10
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.mypy.txt20
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pep8.in1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pep8.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pslint.ps144
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pylint.in2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pylint.txt13
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.in2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.validate-modules.in3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt5
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.yamllint.in1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.yamllint.txt4
-rw-r--r--test/lib/ansible_test/_data/requirements/units.txt6
-rw-r--r--test/lib/ansible_test/_data/requirements/windows-integration.txt5
-rw-r--r--test/lib/ansible_test/_internal/__init__.py115
-rw-r--r--test/lib/ansible_test/_internal/ansible_util.py305
-rw-r--r--test/lib/ansible_test/_internal/become.py108
-rw-r--r--test/lib/ansible_test/_internal/bootstrap.py95
-rw-r--r--test/lib/ansible_test/_internal/cache.py31
-rw-r--r--test/lib/ansible_test/_internal/cgroup.py110
-rw-r--r--test/lib/ansible_test/_internal/ci/__init__.py214
-rw-r--r--test/lib/ansible_test/_internal/ci/azp.py262
-rw-r--r--test/lib/ansible_test/_internal/ci/local.py212
-rw-r--r--test/lib/ansible_test/_internal/classification/__init__.py900
-rw-r--r--test/lib/ansible_test/_internal/classification/common.py26
-rw-r--r--test/lib/ansible_test/_internal/classification/csharp.py97
-rw-r--r--test/lib/ansible_test/_internal/classification/powershell.py98
-rw-r--r--test/lib/ansible_test/_internal/classification/python.py341
-rw-r--r--test/lib/ansible_test/_internal/cli/__init__.py63
-rw-r--r--test/lib/ansible_test/_internal/cli/actions.py90
-rw-r--r--test/lib/ansible_test/_internal/cli/argparsing/__init__.py265
-rw-r--r--test/lib/ansible_test/_internal/cli/argparsing/actions.py18
-rw-r--r--test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py124
-rw-r--r--test/lib/ansible_test/_internal/cli/argparsing/parsers.py597
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/__init__.py241
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/__init__.py85
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/analyze/__init__.py28
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/__init__.py48
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/combine.py49
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/expand.py48
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/filter.py76
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/generate.py49
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/missing.py65
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/combine.py49
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/erase.py36
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/html.py43
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/report.py61
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/coverage/xml.py43
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/env.py63
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/integration/__init__.py162
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/integration/network.py86
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/integration/posix.py51
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/integration/windows.py51
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/sanity.py119
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/shell.py59
-rw-r--r--test/lib/ansible_test/_internal/cli/commands/units.py65
-rw-r--r--test/lib/ansible_test/_internal/cli/compat.py505
-rw-r--r--test/lib/ansible_test/_internal/cli/completers.py30
-rw-r--r--test/lib/ansible_test/_internal/cli/converters.py19
-rw-r--r--test/lib/ansible_test/_internal/cli/environments.py612
-rw-r--r--test/lib/ansible_test/_internal/cli/epilog.py23
-rw-r--r--test/lib/ansible_test/_internal/cli/parsers/__init__.py303
-rw-r--r--test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py73
-rw-r--r--test/lib/ansible_test/_internal/cli/parsers/helpers.py57
-rw-r--r--test/lib/ansible_test/_internal/cli/parsers/host_config_parsers.py310
-rw-r--r--test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py239
-rw-r--r--test/lib/ansible_test/_internal/cli/parsers/value_parsers.py179
-rw-r--r--test/lib/ansible_test/_internal/commands/__init__.py2
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/__init__.py370
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py17
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py154
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py74
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py51
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py122
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py158
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py119
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/combine.py362
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/erase.py43
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/html.py51
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/report.py152
-rw-r--r--test/lib/ansible_test/_internal/commands/coverage/xml.py189
-rw-r--r--test/lib/ansible_test/_internal/commands/env/__init__.py197
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/__init__.py967
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py389
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/acme.py79
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/aws.py131
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/azure.py166
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/cloudscale.py62
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/cs.py174
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/digitalocean.py55
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py94
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py168
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/gcp.py55
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py106
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py92
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/nios.py97
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/opennebula.py60
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/openshift.py114
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/scaleway.py56
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py138
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/cloud/vultr.py55
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/coverage.py417
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/filters.py279
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/network.py77
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/posix.py48
-rw-r--r--test/lib/ansible_test/_internal/commands/integration/windows.py81
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/__init__.py1173
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py127
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/compile.py94
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/ignores.py84
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/import.py217
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/mypy.py259
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/pep8.py109
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/pslint.py119
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/pylint.py270
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/sanity_docs.py60
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/shellcheck.py108
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/validate_modules.py190
-rw-r--r--test/lib/ansible_test/_internal/commands/sanity/yamllint.py125
-rw-r--r--test/lib/ansible_test/_internal/commands/shell/__init__.py135
-rw-r--r--test/lib/ansible_test/_internal/commands/units/__init__.py343
-rw-r--r--test/lib/ansible_test/_internal/compat/__init__.py2
-rw-r--r--test/lib/ansible_test/_internal/compat/packaging.py18
-rw-r--r--test/lib/ansible_test/_internal/compat/yaml.py22
-rw-r--r--test/lib/ansible_test/_internal/completion.py310
-rw-r--r--test/lib/ansible_test/_internal/config.py353
-rw-r--r--test/lib/ansible_test/_internal/connections.py258
-rw-r--r--test/lib/ansible_test/_internal/constants.py48
-rw-r--r--test/lib/ansible_test/_internal/containers.py974
-rw-r--r--test/lib/ansible_test/_internal/content_config.py179
-rw-r--r--test/lib/ansible_test/_internal/core_ci.py547
-rw-r--r--test/lib/ansible_test/_internal/coverage_util.py314
-rw-r--r--test/lib/ansible_test/_internal/data.py286
-rw-r--r--test/lib/ansible_test/_internal/delegation.py375
-rw-r--r--test/lib/ansible_test/_internal/dev/__init__.py2
-rw-r--r--test/lib/ansible_test/_internal/dev/container_probe.py210
-rw-r--r--test/lib/ansible_test/_internal/diff.py226
-rw-r--r--test/lib/ansible_test/_internal/docker_util.py1005
-rw-r--r--test/lib/ansible_test/_internal/encoding.py38
-rw-r--r--test/lib/ansible_test/_internal/executor.py115
-rw-r--r--test/lib/ansible_test/_internal/git.py102
-rw-r--r--test/lib/ansible_test/_internal/host_configs.py523
-rw-r--r--test/lib/ansible_test/_internal/host_profiles.py1428
-rw-r--r--test/lib/ansible_test/_internal/http.py134
-rw-r--r--test/lib/ansible_test/_internal/init.py15
-rw-r--r--test/lib/ansible_test/_internal/inventory.py175
-rw-r--r--test/lib/ansible_test/_internal/io.py88
-rw-r--r--test/lib/ansible_test/_internal/junit_xml.py267
-rw-r--r--test/lib/ansible_test/_internal/locale_util.py61
-rw-r--r--test/lib/ansible_test/_internal/metadata.py125
-rw-r--r--test/lib/ansible_test/_internal/payload.py132
-rw-r--r--test/lib/ansible_test/_internal/provider/__init__.py72
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/__init__.py236
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/ansible.py44
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/collection.py126
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/unsupported.py40
-rw-r--r--test/lib/ansible_test/_internal/provider/source/__init__.py15
-rw-r--r--test/lib/ansible_test/_internal/provider/source/git.py69
-rw-r--r--test/lib/ansible_test/_internal/provider/source/installed.py40
-rw-r--r--test/lib/ansible_test/_internal/provider/source/unsupported.py20
-rw-r--r--test/lib/ansible_test/_internal/provider/source/unversioned.py85
-rw-r--r--test/lib/ansible_test/_internal/provisioning.py214
-rw-r--r--test/lib/ansible_test/_internal/pypi_proxy.py180
-rw-r--r--test/lib/ansible_test/_internal/python_requirements.py570
-rw-r--r--test/lib/ansible_test/_internal/ssh.py299
-rw-r--r--test/lib/ansible_test/_internal/target.py707
-rw-r--r--test/lib/ansible_test/_internal/test.py469
-rw-r--r--test/lib/ansible_test/_internal/thread.py82
-rw-r--r--test/lib/ansible_test/_internal/timeout.py93
-rw-r--r--test/lib/ansible_test/_internal/util.py1146
-rw-r--r--test/lib/ansible_test/_internal/util_common.py486
-rw-r--r--test/lib/ansible_test/_internal/venv.py278
-rw-r--r--test/lib/ansible_test/_util/__init__.py2
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.json13
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py66
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.json8
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py60
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py4
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.json14
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py16
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py46
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.json4
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py18
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py44
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.json10
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py24
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py28
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.json5
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py83
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.json10
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.json5
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py28
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.json7
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.json11
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py277
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.json4
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py124
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.json5
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py32
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.json10
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.json6
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py21
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py14
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/mypy/ansible-core.ini119
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/mypy/ansible-test.ini24
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/mypy/modules.ini98
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pep8/current-ignore.txt4
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pslint/pslint.ps137
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pslint/settings.psd152
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test-target.cfg57
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg63
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/config/code-smell.cfg57
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/config/collection.cfg147
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/config/default.cfg146
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py263
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py85
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py223
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/shellcheck/exclude.txt3
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate.py6
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py18
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py2520
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py176
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1121
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py899
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py222
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/yamllint/config/default.yml19
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/yamllint/config/modules.yml19
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/yamllint/config/plugins.yml19
-rw-r--r--test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py246
-rw-r--r--test/lib/ansible_test/_util/controller/tools/collection_detail.py94
-rw-r--r--test/lib/ansible_test/_util/controller/tools/coverage_stub.ps140
-rw-r--r--test/lib/ansible_test/_util/controller/tools/sslcheck.py22
-rw-r--r--test/lib/ansible_test/_util/controller/tools/yaml_to_json.py27
-rw-r--r--test/lib/ansible_test/_util/target/__init__.py2
-rwxr-xr-xtest/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py45
-rw-r--r--test/lib/ansible_test/_util/target/common/__init__.py2
-rw-r--r--test/lib/ansible_test/_util/target/common/constants.py20
-rw-r--r--test/lib/ansible_test/_util/target/injector/python.py83
-rw-r--r--test/lib/ansible_test/_util/target/injector/virtualenv.sh14
-rw-r--r--test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_collections.py70
-rw-r--r--test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_coverage.py68
-rw-r--r--test/lib/ansible_test/_util/target/sanity/compile/compile.py56
-rw-r--r--test/lib/ansible_test/_util/target/sanity/import/importer.py573
-rw-r--r--test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1435
-rw-r--r--test/lib/ansible_test/_util/target/setup/bootstrap.sh450
-rw-r--r--test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh17
-rw-r--r--test/lib/ansible_test/_util/target/setup/probe_cgroups.py31
-rw-r--r--test/lib/ansible_test/_util/target/setup/quiet_pip.py72
-rw-r--r--test/lib/ansible_test/_util/target/setup/requirements.py337
-rw-r--r--test/lib/ansible_test/_util/target/tools/virtualenvcheck.py21
-rw-r--r--test/lib/ansible_test/_util/target/tools/yamlcheck.py20
-rw-r--r--test/lib/ansible_test/config/cloud-config-aws.ini.template26
-rw-r--r--test/lib/ansible_test/config/cloud-config-azure.ini.template30
-rw-r--r--test/lib/ansible_test/config/cloud-config-cloudscale.ini.template9
-rw-r--r--test/lib/ansible_test/config/cloud-config-cs.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-gcp.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-hcloud.ini.template15
-rw-r--r--test/lib/ansible_test/config/cloud-config-opennebula.ini.template20
-rw-r--r--test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template12
-rw-r--r--test/lib/ansible_test/config/cloud-config-scaleway.ini.template13
-rw-r--r--test/lib/ansible_test/config/cloud-config-vcenter.ini.template26
-rw-r--r--test/lib/ansible_test/config/cloud-config-vultr.ini.template12
-rw-r--r--test/lib/ansible_test/config/config.yml41
-rw-r--r--test/lib/ansible_test/config/inventory.networking.template42
-rw-r--r--test/lib/ansible_test/config/inventory.winrm.template28
316 files changed, 41199 insertions, 0 deletions
diff --git a/test/lib/ansible_test/__init__.py b/test/lib/ansible_test/__init__.py
new file mode 100644
index 0000000..527d413
--- /dev/null
+++ b/test/lib/ansible_test/__init__.py
@@ -0,0 +1,2 @@
+# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x.
+# This allows the ansible-test entry point to report supported Python versions before exiting.
diff --git a/test/lib/ansible_test/_data/ansible.cfg b/test/lib/ansible_test/_data/ansible.cfg
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/lib/ansible_test/_data/ansible.cfg
diff --git a/test/lib/ansible_test/_data/completion/docker.txt b/test/lib/ansible_test/_data/completion/docker.txt
new file mode 100644
index 0000000..9e1a9d5
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/docker.txt
@@ -0,0 +1,9 @@
+base image=quay.io/ansible/base-test-container:3.9.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10
+default image=quay.io/ansible/default-test-container:6.13.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10 context=collection
+default image=quay.io/ansible/ansible-core-test-container:6.13.0 python=3.11,2.7,3.5,3.6,3.7,3.8,3.9,3.10 context=ansible-core
+alpine3 image=quay.io/ansible/alpine3-test-container:4.8.0 python=3.10 cgroup=none audit=none
+centos7 image=quay.io/ansible/centos7-test-container:4.8.0 python=2.7 cgroup=v1-only
+fedora36 image=quay.io/ansible/fedora36-test-container:4.8.0 python=3.10
+opensuse15 image=quay.io/ansible/opensuse15-test-container:4.8.0 python=3.6
+ubuntu2004 image=quay.io/ansible/ubuntu2004-test-container:4.8.0 python=3.8
+ubuntu2204 image=quay.io/ansible/ubuntu2204-test-container:4.8.0 python=3.10
diff --git a/test/lib/ansible_test/_data/completion/network.txt b/test/lib/ansible_test/_data/completion/network.txt
new file mode 100644
index 0000000..1d6b0c1
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/network.txt
@@ -0,0 +1,2 @@
+ios/csr1000v collection=cisco.ios connection=ansible.netcommon.network_cli provider=aws arch=x86_64
+vyos/1.1.8 collection=vyos.vyos connection=ansible.netcommon.network_cli provider=aws arch=x86_64
diff --git a/test/lib/ansible_test/_data/completion/remote.txt b/test/lib/ansible_test/_data/completion/remote.txt
new file mode 100644
index 0000000..192298b
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/remote.txt
@@ -0,0 +1,16 @@
+alpine/3.16 python=3.10 become=doas_sudo provider=aws arch=x86_64
+alpine become=doas_sudo provider=aws arch=x86_64
+fedora/36 python=3.10 become=sudo provider=aws arch=x86_64
+fedora become=sudo provider=aws arch=x86_64
+freebsd/12.3 python=3.8 python_dir=/usr/local/bin become=su_sudo provider=aws arch=x86_64
+freebsd/13.1 python=3.8,3.7,3.9,3.10 python_dir=/usr/local/bin become=su_sudo provider=aws arch=x86_64
+freebsd python_dir=/usr/local/bin become=su_sudo provider=aws arch=x86_64
+macos/12.0 python=3.10 python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64
+macos python_dir=/usr/local/bin become=sudo provider=parallels arch=x86_64
+rhel/7.9 python=2.7 become=sudo provider=aws arch=x86_64
+rhel/8.6 python=3.6,3.8,3.9 become=sudo provider=aws arch=x86_64
+rhel/9.0 python=3.9 become=sudo provider=aws arch=x86_64
+rhel become=sudo provider=aws arch=x86_64
+ubuntu/20.04 python=3.8,3.9 become=sudo provider=aws arch=x86_64
+ubuntu/22.04 python=3.10 become=sudo provider=aws arch=x86_64
+ubuntu become=sudo provider=aws arch=x86_64
diff --git a/test/lib/ansible_test/_data/completion/windows.txt b/test/lib/ansible_test/_data/completion/windows.txt
new file mode 100644
index 0000000..767c36c
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/windows.txt
@@ -0,0 +1,6 @@
+windows/2012 provider=aws arch=x86_64
+windows/2012-R2 provider=aws arch=x86_64
+windows/2016 provider=aws arch=x86_64
+windows/2019 provider=aws arch=x86_64
+windows/2022 provider=aws arch=x86_64
+windows provider=aws arch=x86_64
diff --git a/test/lib/ansible_test/_data/coveragerc b/test/lib/ansible_test/_data/coveragerc
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/lib/ansible_test/_data/coveragerc
diff --git a/test/lib/ansible_test/_data/playbooks/posix_coverage_setup.yml b/test/lib/ansible_test/_data/playbooks/posix_coverage_setup.yml
new file mode 100644
index 0000000..6ed8682
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/posix_coverage_setup.yml
@@ -0,0 +1,21 @@
+- name: Setup POSIX code coverage configuration
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Create coverage temporary directory
+ file:
+ path: "{{ common_temp_dir }}"
+ mode: "{{ mode_directory }}"
+ state: directory
+
+ - name: Create coverage configuration file
+ copy:
+ dest: "{{ coverage_config_path }}"
+ content: "{{ coverage_config }}"
+ mode: "{{ mode_file }}"
+
+ - name: Create coverage output directory
+ file:
+ path: "{{ coverage_output_path }}"
+ mode: "{{ mode_directory_write }}"
+ state: directory
diff --git a/test/lib/ansible_test/_data/playbooks/posix_coverage_teardown.yml b/test/lib/ansible_test/_data/playbooks/posix_coverage_teardown.yml
new file mode 100644
index 0000000..290411b
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/posix_coverage_teardown.yml
@@ -0,0 +1,8 @@
+- name: Teardown POSIX code coverage configuration
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Remove coverage temporary directory
+ file:
+ path: "{{ common_temp_dir }}"
+ state: absent
diff --git a/test/lib/ansible_test/_data/playbooks/posix_hosts_prepare.yml b/test/lib/ansible_test/_data/playbooks/posix_hosts_prepare.yml
new file mode 100644
index 0000000..69a0713
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/posix_hosts_prepare.yml
@@ -0,0 +1,9 @@
+- name: Prepare POSIX hosts file
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Add container hostname(s) to hosts file
+ blockinfile:
+ path: /etc/hosts
+ block: "{{ '\n'.join(hosts_entries) }}"
+ unsafe_writes: yes
diff --git a/test/lib/ansible_test/_data/playbooks/posix_hosts_restore.yml b/test/lib/ansible_test/_data/playbooks/posix_hosts_restore.yml
new file mode 100644
index 0000000..1549ed6
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/posix_hosts_restore.yml
@@ -0,0 +1,10 @@
+- name: Restore POSIX hosts file
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Remove container hostname(s) from hosts file
+ blockinfile:
+ path: /etc/hosts
+ block: "{{ '\n'.join(hosts_entries) }}"
+ unsafe_writes: yes
+ state: absent
diff --git a/test/lib/ansible_test/_data/playbooks/pypi_proxy_prepare.yml b/test/lib/ansible_test/_data/playbooks/pypi_proxy_prepare.yml
new file mode 100644
index 0000000..0f9948c
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/pypi_proxy_prepare.yml
@@ -0,0 +1,23 @@
+- name: Prepare PyPI proxy configuration
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Make sure the ~/.pip directory exists
+ file:
+ path: ~/.pip
+ state: directory
+ - name: Configure a custom index for pip based installs
+ copy:
+ content: |
+ [global]
+ index-url = {{ pypi_endpoint }}
+ trusted-host = {{ pypi_hostname }}
+ dest: ~/.pip/pip.conf
+ force: "{{ force }}"
+ - name: Configure a custom index for easy_install based installs
+ copy:
+ content: |
+ [easy_install]
+ index_url = {0}
+ dest: ~/.pydistutils.cfg
+ force: "{{ force }}"
diff --git a/test/lib/ansible_test/_data/playbooks/pypi_proxy_restore.yml b/test/lib/ansible_test/_data/playbooks/pypi_proxy_restore.yml
new file mode 100644
index 0000000..5410fb2
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/pypi_proxy_restore.yml
@@ -0,0 +1,12 @@
+- name: Restore PyPI proxy configuration
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Remove custom index for pip based installs
+ file:
+ path: ~/.pip/pip.conf
+ state: absent
+ - name: Remove custom index for easy_install based installs
+ file:
+ path: ~/.pydistutils.cfg
+ state: absent
diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml
new file mode 100644
index 0000000..db7976e
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml
@@ -0,0 +1,18 @@
+- name: Setup Windows code coverage configuration
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Create coverage temporary directory
+ ansible.windows.win_file:
+ path: '{{ remote_temp_path }}'
+ state: directory
+
+ - name: Allow everyone to write to the temporary coverage directory
+ ansible.windows.win_acl:
+ path: '{{ remote_temp_path }}'
+ user: Everyone
+ rights: Modify
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+ type: allow
+ state: present
diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml
new file mode 100644
index 0000000..f1fa433
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml
@@ -0,0 +1,70 @@
+- name: Teardown Windows code coverage configuration
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Zip up all coverage files
+ ansible.windows.win_shell: |
+ $coverage_dir = '{{ remote_temp_path }}'
+ $zip_file = Join-Path -Path $coverage_dir -ChildPath 'coverage.zip'
+ if (Test-Path -LiteralPath $zip_file) {
+ Remove-Item -LiteralPath $zip_file -Force
+ }
+
+ $coverage_files = Get-ChildItem -LiteralPath $coverage_dir -Include '*=coverage*' -File
+
+ $legacy = $false
+ try {
+ # Requires .NET 4.5+ which isn't present on older WIndows versions. Remove once 2008/R2 is EOL.
+ # We also can't use the Shell.Application as it will fail on GUI-less servers (Server Core).
+ Add-Type -AssemblyName System.IO.Compression -ErrorAction Stop > $null
+ } catch {
+ $legacy = $true
+ }
+
+ if ($legacy) {
+ New-Item -Path $zip_file -ItemType File > $null
+ $shell = New-Object -ComObject Shell.Application
+ $zip = $shell.Namespace($zip_file)
+ foreach ($file in $coverage_files) {
+ $zip.CopyHere($file.FullName)
+ }
+ } else {
+ $fs = New-Object -TypeName System.IO.FileStream -ArgumentList $zip_file, 'CreateNew'
+ try {
+ $archive = New-Object -TypeName System.IO.Compression.ZipArchive -ArgumentList @(
+ $fs,
+ [System.IO.Compression.ZipArchiveMode]::Create
+ )
+ try {
+ foreach ($file in $coverage_files) {
+ $archive_entry = $archive.CreateEntry($file.Name, 'Optimal')
+ $entry_fs = $archive_entry.Open()
+ try {
+ $file_fs = [System.IO.File]::OpenRead($file.FullName)
+ try {
+ $file_fs.CopyTo($entry_fs)
+ } finally {
+ $file_fs.Dispose()
+ }
+ } finally {
+ $entry_fs.Dispose()
+ }
+ }
+ } finally {
+ $archive.Dispose()
+ }
+ } finally {
+ $fs.Dispose()
+ }
+ }
+
+ - name: Fetch coverage zip
+ fetch:
+ src: '{{ remote_temp_path }}\coverage.zip'
+ dest: '{{ local_temp_path }}/{{ inventory_hostname }}.zip'
+ flat: yes
+
+ - name: Remove temporary coverage directory
+ ansible.windows.win_file:
+ path: '{{ remote_temp_path }}'
+ state: absent
diff --git a/test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.ps1 b/test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.ps1
new file mode 100644
index 0000000..b9e563d
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.ps1
@@ -0,0 +1,34 @@
+<#
+.SYNOPSIS
+Add one or more hosts entries to the Windows hosts file.
+
+.PARAMETER Hosts
+A list of hosts entries, delimited by '|'.
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true, Position = 0)][String]$Hosts
+)
+
+$ProgressPreference = "SilentlyContinue"
+$ErrorActionPreference = "Stop"
+
+Write-Verbose -Message "Adding host file entries"
+
+$hosts_entries = $Hosts.Split('|')
+$hosts_file = "$env:SystemRoot\System32\drivers\etc\hosts"
+$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
+$changed = $false
+
+foreach ($entry in $hosts_entries) {
+ if ($entry -notin $hosts_file_lines) {
+ $hosts_file_lines += $entry
+ $changed = $true
+ }
+}
+
+if ($changed) {
+ Write-Verbose -Message "Host file is missing entries, adding missing entries"
+ [System.IO.File]::WriteAllLines($hosts_file, $hosts_file_lines)
+}
diff --git a/test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.yml b/test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.yml
new file mode 100644
index 0000000..0a23086
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_hosts_prepare.yml
@@ -0,0 +1,7 @@
+- name: Prepare Windows hosts file
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Add container hostname(s) to hosts file
+ script:
+ cmd: "\"{{ playbook_dir }}/windows_hosts_prepare.ps1\" -Hosts \"{{ '|'.join(hosts_entries) }}\""
diff --git a/test/lib/ansible_test/_data/playbooks/windows_hosts_restore.ps1 b/test/lib/ansible_test/_data/playbooks/windows_hosts_restore.ps1
new file mode 100644
index 0000000..ac19ffe
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_hosts_restore.ps1
@@ -0,0 +1,38 @@
+<#
+.SYNOPSIS
+Remove one or more hosts entries from the Windows hosts file.
+
+.PARAMETER Hosts
+A list of hosts entries, delimited by '|'.
+#>
+
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory = $true, Position = 0)][String]$Hosts
+)
+
+$ProgressPreference = "SilentlyContinue"
+$ErrorActionPreference = "Stop"
+
+Write-Verbose -Message "Removing host file entries"
+
+$hosts_entries = $Hosts.Split('|')
+$hosts_file = "$env:SystemRoot\System32\drivers\etc\hosts"
+$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
+$changed = $false
+
+$new_lines = [System.Collections.ArrayList]@()
+
+foreach ($host_line in $hosts_file_lines) {
+ if ($host_line -in $hosts_entries) {
+ $changed = $true
+ }
+ else {
+ $new_lines += $host_line
+ }
+}
+
+if ($changed) {
+ Write-Verbose -Message "Host file has extra entries, removing extra entries"
+ [System.IO.File]::WriteAllLines($hosts_file, $new_lines)
+}
diff --git a/test/lib/ansible_test/_data/playbooks/windows_hosts_restore.yml b/test/lib/ansible_test/_data/playbooks/windows_hosts_restore.yml
new file mode 100644
index 0000000..c595d5f
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_hosts_restore.yml
@@ -0,0 +1,7 @@
+- name: Restore Windows hosts file
+ hosts: all
+ gather_facts: no
+ tasks:
+ - name: Remove container hostname(s) from hosts file
+ script:
+ cmd: "\"{{ playbook_dir }}/windows_hosts_restore.ps1\" -Hosts \"{{ '|'.join(hosts_entries) }}\""
diff --git a/test/lib/ansible_test/_data/pytest/config/default.ini b/test/lib/ansible_test/_data/pytest/config/default.ini
new file mode 100644
index 0000000..60575bf
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest/config/default.ini
@@ -0,0 +1,4 @@
+[pytest]
+xfail_strict = true
+# avoid using 'mock_use_standalone_module = true' so package maintainers can avoid packaging 'mock'
+junit_family = xunit1
diff --git a/test/lib/ansible_test/_data/pytest/config/legacy.ini b/test/lib/ansible_test/_data/pytest/config/legacy.ini
new file mode 100644
index 0000000..b2668dc
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest/config/legacy.ini
@@ -0,0 +1,4 @@
+[pytest]
+xfail_strict = true
+mock_use_standalone_module = true
+junit_family = xunit1
diff --git a/test/lib/ansible_test/_data/requirements/ansible-test.txt b/test/lib/ansible_test/_data/requirements/ansible-test.txt
new file mode 100644
index 0000000..f7cb9c2
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/ansible-test.txt
@@ -0,0 +1,4 @@
+# The test-constraints sanity test verifies this file, but changes must be made manually to keep it in up-to-date.
+virtualenv == 16.7.12 ; python_version < '3'
+coverage == 6.5.0 ; python_version >= '3.7' and python_version <= '3.11'
+coverage == 4.5.4 ; python_version >= '2.6' and python_version <= '3.6'
diff --git a/test/lib/ansible_test/_data/requirements/ansible.txt b/test/lib/ansible_test/_data/requirements/ansible.txt
new file mode 100644
index 0000000..20562c3
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/ansible.txt
@@ -0,0 +1,15 @@
+# Note: this requirements.txt file is used to specify what dependencies are
+# needed to make the package run rather than for deployment of a tested set of
+# packages. Thus, this should be the loosest set possible (only required
+# packages, not optional ones, and with the widest range of versions that could
+# be suitable)
+jinja2 >= 3.0.0
+PyYAML >= 5.1 # PyYAML 5.1 is required for Python 3.8+ support
+cryptography
+packaging
+# NOTE: resolvelib 0.x version bumps should be considered major/breaking
+# NOTE: and we should update the upper cap with care, at least until 1.0
+# NOTE: Ref: https://github.com/sarugaku/resolvelib/issues/69
+# NOTE: When updating the upper bound, also update the latest version used
+# NOTE: in the ansible-galaxy-collection test suite.
+resolvelib >= 0.5.3, < 0.9.0 # dependency resolver used by ansible-galaxy
diff --git a/test/lib/ansible_test/_data/requirements/constraints.txt b/test/lib/ansible_test/_data/requirements/constraints.txt
new file mode 100644
index 0000000..627f41d
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/constraints.txt
@@ -0,0 +1,17 @@
+# do not add a cryptography or pyopenssl constraint to this file, they require special handling, see get_cryptography_requirements in python_requirements.py
+# do not add a coverage constraint to this file, it is handled internally by ansible-test
+packaging < 21.0 ; python_version < '3.6' # packaging 21.0 requires Python 3.6 or newer
+pywinrm >= 0.3.0 ; python_version < '3.11' # message encryption support
+pywinrm >= 0.4.3 ; python_version >= '3.11' # support for Python 3.11
+pytest < 5.0.0, >= 4.5.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7
+pytest >= 4.5.0 ; python_version > '2.7' # pytest 4.5.0 added support for --strict-markers
+pytest-forked >= 1.0.2 # pytest-forked before 1.0.2 does not work with pytest 4.2.0+
+ntlm-auth >= 1.3.0 # message encryption support using cryptography
+requests-ntlm >= 1.1.0 # message encryption support
+requests-credssp >= 0.1.0 # message encryption support
+pyparsing < 3.0.0 ; python_version < '3.5' # pyparsing 3 and later require python 3.5 or later
+mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...)
+pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option
+setuptools < 45 ; python_version == '2.7' # setuptools 45 and later require python 3.5 or later
+pyspnego >= 0.1.6 ; python_version >= '3.10' # bug in older releases breaks on Python 3.10
+wheel < 0.38.0 ; python_version < '3.7' # wheel 0.38.0 and later require python 3.7 or later
diff --git a/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.in b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.in
new file mode 100644
index 0000000..80c769f
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.in
@@ -0,0 +1,3 @@
+jinja2 # ansible-core requirement
+packaging # ansible-core requirement
+pyyaml # ansible-core requirement
diff --git a/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt
new file mode 100644
index 0000000..59fa870
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt
@@ -0,0 +1,6 @@
+# edit "sanity.ansible-doc.in" and generate with: hacking/update-sanity-requirements.py --test ansible-doc
+Jinja2==3.1.2
+MarkupSafe==2.1.1
+packaging==21.3
+pyparsing==3.0.9
+PyYAML==6.0
diff --git a/test/lib/ansible_test/_data/requirements/sanity.changelog.in b/test/lib/ansible_test/_data/requirements/sanity.changelog.in
new file mode 100644
index 0000000..7f23182
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.changelog.in
@@ -0,0 +1,3 @@
+rstcheck < 4 # match version used in other sanity tests
+antsibull-changelog
+docutils < 0.18 # match version required by sphinx in the docs-build sanity test
diff --git a/test/lib/ansible_test/_data/requirements/sanity.changelog.txt b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt
new file mode 100644
index 0000000..1b2b252
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt
@@ -0,0 +1,8 @@
+# edit "sanity.changelog.in" and generate with: hacking/update-sanity-requirements.py --test changelog
+antsibull-changelog==0.16.0
+docutils==0.17.1
+packaging==21.3
+pyparsing==3.0.9
+PyYAML==6.0
+rstcheck==3.5.0
+semantic-version==2.10.0
diff --git a/test/lib/ansible_test/_data/requirements/sanity.import.in b/test/lib/ansible_test/_data/requirements/sanity.import.in
new file mode 100644
index 0000000..dea704e
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.import.in
@@ -0,0 +1 @@
+pyyaml # needed for yaml_to_json.py
diff --git a/test/lib/ansible_test/_data/requirements/sanity.import.plugin.in b/test/lib/ansible_test/_data/requirements/sanity.import.plugin.in
new file mode 100644
index 0000000..cec0eed
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.import.plugin.in
@@ -0,0 +1,2 @@
+jinja2 # ansible-core requirement
+pyyaml # ansible-core requirement
diff --git a/test/lib/ansible_test/_data/requirements/sanity.import.plugin.txt b/test/lib/ansible_test/_data/requirements/sanity.import.plugin.txt
new file mode 100644
index 0000000..ef7b006
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.import.plugin.txt
@@ -0,0 +1,4 @@
+# edit "sanity.import.plugin.in" and generate with: hacking/update-sanity-requirements.py --test import.plugin
+Jinja2==3.1.2
+MarkupSafe==2.1.1
+PyYAML==6.0
diff --git a/test/lib/ansible_test/_data/requirements/sanity.import.txt b/test/lib/ansible_test/_data/requirements/sanity.import.txt
new file mode 100644
index 0000000..e9645ea
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.import.txt
@@ -0,0 +1,2 @@
+# edit "sanity.import.in" and generate with: hacking/update-sanity-requirements.py --test import
+PyYAML==6.0
diff --git a/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.in b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.in
new file mode 100644
index 0000000..c3726e8
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.in
@@ -0,0 +1 @@
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt
new file mode 100644
index 0000000..ba3a502
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt
@@ -0,0 +1,2 @@
+# edit "sanity.integration-aliases.in" and generate with: hacking/update-sanity-requirements.py --test integration-aliases
+PyYAML==6.0
diff --git a/test/lib/ansible_test/_data/requirements/sanity.mypy.in b/test/lib/ansible_test/_data/requirements/sanity.mypy.in
new file mode 100644
index 0000000..98dead6
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.mypy.in
@@ -0,0 +1,10 @@
+mypy[python2] != 0.971 # regression in 0.971 (see https://github.com/python/mypy/pull/13223)
+packaging # type stubs not published separately
+types-backports
+types-jinja2
+types-paramiko < 2.8.14 # newer versions drop support for Python 2.7
+types-pyyaml < 6 # PyYAML 6+ stubs do not support Python 2.7
+types-cryptography < 3.3.16 # newer versions drop support for Python 2.7
+types-requests
+types-setuptools
+types-toml
diff --git a/test/lib/ansible_test/_data/requirements/sanity.mypy.txt b/test/lib/ansible_test/_data/requirements/sanity.mypy.txt
new file mode 100644
index 0000000..9dffc8f
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.mypy.txt
@@ -0,0 +1,20 @@
+# edit "sanity.mypy.in" and generate with: hacking/update-sanity-requirements.py --test mypy
+mypy==0.961
+mypy-extensions==0.4.3
+packaging==21.3
+pyparsing==3.0.9
+tomli==2.0.1
+typed-ast==1.5.4
+types-backports==0.1.3
+types-cryptography==3.3.15
+types-enum34==1.1.8
+types-ipaddress==1.0.8
+types-Jinja2==2.11.9
+types-MarkupSafe==1.1.10
+types-paramiko==2.8.13
+types-PyYAML==5.4.12
+types-requests==2.28.10
+types-setuptools==65.3.0
+types-toml==0.10.8
+types-urllib3==1.26.24
+typing_extensions==4.3.0
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pep8.in b/test/lib/ansible_test/_data/requirements/sanity.pep8.in
new file mode 100644
index 0000000..282a93f
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pep8.in
@@ -0,0 +1 @@
+pycodestyle
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pep8.txt b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt
new file mode 100644
index 0000000..60d5784
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt
@@ -0,0 +1,2 @@
+# edit "sanity.pep8.in" and generate with: hacking/update-sanity-requirements.py --test pep8
+pycodestyle==2.9.1
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pslint.ps1 b/test/lib/ansible_test/_data/requirements/sanity.pslint.ps1
new file mode 100644
index 0000000..68545c9
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pslint.ps1
@@ -0,0 +1,44 @@
+param (
+ [Switch]
+ $IsContainer
+)
+
+#Requires -Version 7
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$ProgressPreference = 'SilentlyContinue'
+
+Function Install-PSModule {
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory = $true)]
+ [String]
+ $Name,
+
+ [Parameter(Mandatory = $true)]
+ [Version]
+ $RequiredVersion
+ )
+
+ # In case PSGallery is down we check if the module is already installed.
+ $installedModule = Get-Module -Name $Name -ListAvailable | Where-Object Version -eq $RequiredVersion
+ if (-not $installedModule) {
+ Install-Module -Name $Name -RequiredVersion $RequiredVersion -Scope CurrentUser
+ }
+}
+
+Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
+Install-PSModule -Name PSScriptAnalyzer -RequiredVersion 1.20.0
+
+if ($IsContainer) {
+ # PSScriptAnalyzer contain lots of json files for the UseCompatibleCommands check. We don't use this rule so by
+ # removing the contents we can save 200MB in the docker image (or more in the future).
+ # https://github.com/PowerShell/PSScriptAnalyzer/blob/master/docs/Rules/UseCompatibleCommands.md
+ $pssaPath = (Get-Module -ListAvailable -Name PSScriptAnalyzer).ModuleBase
+ $compatPath = Join-Path -Path $pssaPath -ChildPath compatibility_profiles -AdditionalChildPath '*'
+ Remove-Item -Path $compatPath -Recurse -Force
+}
+
+# Installed the PSCustomUseLiteralPath rule
+Install-PSModule -Name PSSA-PSCustomUseLiteralPath -RequiredVersion 0.1.1
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pylint.in b/test/lib/ansible_test/_data/requirements/sanity.pylint.in
new file mode 100644
index 0000000..fde21f1
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pylint.in
@@ -0,0 +1,2 @@
+pylint == 2.15.5 # currently vetted version
+pyyaml # needed for collection_detail.py
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pylint.txt b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt
new file mode 100644
index 0000000..a1c6a6a
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt
@@ -0,0 +1,13 @@
+# edit "sanity.pylint.in" and generate with: hacking/update-sanity-requirements.py --test pylint
+astroid==2.12.12
+dill==0.3.6
+isort==5.10.1
+lazy-object-proxy==1.7.1
+mccabe==0.7.0
+platformdirs==2.5.2
+pylint==2.15.5
+PyYAML==6.0
+tomli==2.0.1
+tomlkit==0.11.5
+typing_extensions==4.3.0
+wrapt==1.14.1
diff --git a/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.in b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.in
new file mode 100644
index 0000000..edd9699
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.in
@@ -0,0 +1,2 @@
+pyyaml
+voluptuous
diff --git a/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt
new file mode 100644
index 0000000..3953b77
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt
@@ -0,0 +1,3 @@
+# edit "sanity.runtime-metadata.in" and generate with: hacking/update-sanity-requirements.py --test runtime-metadata
+PyYAML==6.0
+voluptuous==0.13.1
diff --git a/test/lib/ansible_test/_data/requirements/sanity.validate-modules.in b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.in
new file mode 100644
index 0000000..efe9400
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.in
@@ -0,0 +1,3 @@
+jinja2 # ansible-core requirement
+pyyaml # needed for collection_detail.py
+voluptuous
diff --git a/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt
new file mode 100644
index 0000000..e737f90
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt
@@ -0,0 +1,5 @@
+# edit "sanity.validate-modules.in" and generate with: hacking/update-sanity-requirements.py --test validate-modules
+Jinja2==3.1.2
+MarkupSafe==2.1.1
+PyYAML==6.0
+voluptuous==0.13.1
diff --git a/test/lib/ansible_test/_data/requirements/sanity.yamllint.in b/test/lib/ansible_test/_data/requirements/sanity.yamllint.in
new file mode 100644
index 0000000..b2c729c
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.yamllint.in
@@ -0,0 +1 @@
+yamllint
diff --git a/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt
new file mode 100644
index 0000000..fd013b5
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt
@@ -0,0 +1,4 @@
+# edit "sanity.yamllint.in" and generate with: hacking/update-sanity-requirements.py --test yamllint
+pathspec==0.10.1
+PyYAML==6.0
+yamllint==1.28.0
diff --git a/test/lib/ansible_test/_data/requirements/units.txt b/test/lib/ansible_test/_data/requirements/units.txt
new file mode 100644
index 0000000..d2f56d3
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/units.txt
@@ -0,0 +1,6 @@
+mock
+pytest
+pytest-mock
+pytest-xdist
+pytest-forked
+pyyaml # required by the collection loader (only needed for collections)
diff --git a/test/lib/ansible_test/_data/requirements/windows-integration.txt b/test/lib/ansible_test/_data/requirements/windows-integration.txt
new file mode 100644
index 0000000..b3554de
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/windows-integration.txt
@@ -0,0 +1,5 @@
+ntlm-auth
+requests-ntlm
+requests-credssp
+pypsrp
+pywinrm[credssp]
diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py
new file mode 100644
index 0000000..d218b56
--- /dev/null
+++ b/test/lib/ansible_test/_internal/__init__.py
@@ -0,0 +1,115 @@
+"""Test runner for all Ansible tests."""
+from __future__ import annotations
+
+import os
+import sys
+import typing as t
+
+# This import should occur as early as possible.
+# It must occur before subprocess has been imported anywhere in the current process.
+from .init import (
+ CURRENT_RLIMIT_NOFILE,
+)
+
+from .constants import (
+ STATUS_HOST_CONNECTION_ERROR,
+)
+
+from .util import (
+ ApplicationError,
+ HostConnectionError,
+ display,
+ report_locale,
+)
+
+from .delegation import (
+ delegate,
+)
+
+from .executor import (
+ ApplicationWarning,
+ Delegate,
+ ListTargets,
+)
+
+from .timeout import (
+ configure_timeout,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ CommonConfig,
+)
+
+from .cli import (
+ parse_args,
+)
+
+from .provisioning import (
+ PrimeContainers,
+)
+
+from .config import (
+ TestConfig,
+)
+
+
+def main(cli_args: t.Optional[list[str]] = None) -> None:
+ """Main program function."""
+ try:
+ os.chdir(data_context().content.root)
+ args = parse_args(cli_args)
+ config: CommonConfig = args.config(args)
+ display.verbosity = config.verbosity
+ display.truncate = config.truncate
+ display.redact = config.redact
+ display.color = config.color
+ display.fd = sys.stderr if config.display_stderr else sys.stdout
+ configure_timeout(config)
+ report_locale(isinstance(config, TestConfig) and not config.delegate)
+
+ display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
+
+ delegate_args = None
+ target_names = None
+
+ try:
+ if config.check_layout:
+ data_context().check_layout()
+
+ args.func(config)
+ except PrimeContainers:
+ pass
+ except ListTargets as ex:
+ # save target_names for use once we exit the exception handler
+ target_names = ex.target_names
+ except Delegate as ex:
+ # save delegation args for use once we exit the exception handler
+ delegate_args = (ex.host_state, ex.exclude, ex.require)
+
+ if delegate_args:
+ delegate(config, *delegate_args)
+
+ if target_names:
+ for target_name in target_names:
+ print(target_name) # display goes to stderr, this should be on stdout
+
+ display.review_warnings()
+ config.success = True
+ except HostConnectionError as ex:
+ display.fatal(str(ex))
+ ex.run_callback()
+ sys.exit(STATUS_HOST_CONNECTION_ERROR)
+ except ApplicationWarning as ex:
+ display.warning('%s' % ex)
+ sys.exit(0)
+ except ApplicationError as ex:
+ display.fatal('%s' % ex)
+ sys.exit(1)
+ except KeyboardInterrupt:
+ sys.exit(2)
+ except BrokenPipeError:
+ sys.exit(3)
diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py
new file mode 100644
index 0000000..9efcda2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ansible_util.py
@@ -0,0 +1,305 @@
+"""Miscellaneous utility functions and classes specific to ansible cli tools."""
+from __future__ import annotations
+
+import json
+import os
+import typing as t
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+from .io import (
+ write_text_file,
+)
+
+from .util import (
+ common_environment,
+ ApplicationError,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_SOURCE_ROOT,
+ ANSIBLE_TEST_TOOLS_ROOT,
+ get_ansible_version,
+ raw_command,
+)
+
+from .util_common import (
+ create_temp_dir,
+ ResultType,
+ intercept_python,
+ get_injector_path,
+)
+
+from .config import (
+ IntegrationConfig,
+ PosixIntegrationConfig,
+ EnvironmentConfig,
+ CommonConfig,
+)
+
+from .data import (
+ data_context,
+)
+
+from .python_requirements import (
+ install_requirements,
+)
+
+from .host_configs import (
+ PythonConfig,
+)
+
+from .thread import (
+ mutex,
+)
+
+
+def parse_inventory(args: EnvironmentConfig, inventory_path: str) -> dict[str, t.Any]:
+ """Return a dict parsed from the given inventory file."""
+ cmd = ['ansible-inventory', '-i', inventory_path, '--list']
+ env = ansible_environment(args)
+ inventory = json.loads(intercept_python(args, args.controller_python, cmd, env, capture=True, always=True)[0])
+ return inventory
+
+
+def get_hosts(inventory: dict[str, t.Any], group_name: str) -> dict[str, dict[str, t.Any]]:
+ """Return a dict of hosts from the specified group in the given inventory."""
+ hostvars = inventory.get('_meta', {}).get('hostvars', {})
+ group = inventory.get(group_name, {})
+ host_names = group.get('hosts', [])
+ hosts = dict((name, hostvars.get(name, {})) for name in host_names)
+ return hosts
+
+
+def ansible_environment(args: CommonConfig, color: bool = True, ansible_config: t.Optional[str] = None) -> dict[str, str]:
+ """Return a dictionary of environment variables to use when running Ansible commands."""
+ env = common_environment()
+ path = env['PATH']
+
+ if not path.startswith(ANSIBLE_BIN_PATH + os.path.pathsep):
+ path = ANSIBLE_BIN_PATH + os.path.pathsep + path
+
+ if not ansible_config:
+ # use the default empty configuration unless one has been provided
+ ansible_config = args.get_ansible_config()
+
+ if not args.explain and not os.path.exists(ansible_config):
+ raise ApplicationError('Configuration not found: %s' % ansible_config)
+
+ ansible = dict(
+ ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE=str(SOFT_RLIMIT_NOFILE),
+ ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
+ ANSIBLE_FORCE_HANDLERS='true', # allow cleanup handlers to run when tests fail
+ ANSIBLE_HOST_PATTERN_MISMATCH='error', # prevent tests from unintentionally passing when hosts are not found
+ ANSIBLE_INVENTORY='/dev/null', # force tests to provide inventory
+ ANSIBLE_DEPRECATION_WARNINGS='false',
+ ANSIBLE_HOST_KEY_CHECKING='false',
+ ANSIBLE_RETRY_FILES_ENABLED='false',
+ ANSIBLE_CONFIG=ansible_config,
+ ANSIBLE_LIBRARY='/dev/null',
+ ANSIBLE_DEVEL_WARNING='false', # Don't show warnings that CI is running devel
+ PYTHONPATH=get_ansible_python_path(args),
+ PAGER='/bin/cat',
+ PATH=path,
+ # give TQM worker processes time to report code coverage results
+ # without this the last task in a play may write no coverage file, an empty file, or an incomplete file
+ # enabled even when not using code coverage to surface warnings when worker processes do not exit cleanly
+ ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT='100',
+ ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY='0.1',
+ )
+
+ if isinstance(args, IntegrationConfig) and args.coverage:
+ # standard path injection is not effective for ansible-connection, instead the location must be configured
+ # ansible-connection only requires the injector for code coverage
+ # the correct python interpreter is already selected using the sys.executable used to invoke ansible
+ ansible.update(dict(
+ ANSIBLE_CONNECTION_PATH=os.path.join(get_injector_path(), 'ansible-connection'),
+ ))
+
+ if isinstance(args, PosixIntegrationConfig):
+ ansible.update(dict(
+ ANSIBLE_PYTHON_INTERPRETER='/set/ansible_python_interpreter/in/inventory', # force tests to set ansible_python_interpreter in inventory
+ ))
+
+ env.update(ansible)
+
+ if args.debug:
+ env.update(dict(
+ ANSIBLE_DEBUG='true',
+ ANSIBLE_LOG_PATH=os.path.join(ResultType.LOGS.name, 'debug.log'),
+ ))
+
+ if data_context().content.collection:
+ env.update(dict(
+ ANSIBLE_COLLECTIONS_PATH=data_context().content.collection.root,
+ ))
+
+ if data_context().content.is_ansible:
+ env.update(configure_plugin_paths(args))
+
+ return env
+
+
+def configure_plugin_paths(args: CommonConfig) -> dict[str, str]:
+ """Return environment variables with paths to plugins relevant for the current command."""
+ if not isinstance(args, IntegrationConfig):
+ return {}
+
+ support_path = os.path.join(ANSIBLE_SOURCE_ROOT, 'test', 'support', args.command)
+
+ # provide private copies of collections for integration tests
+ collection_root = os.path.join(support_path, 'collections')
+
+ env = dict(
+ ANSIBLE_COLLECTIONS_PATH=collection_root,
+ )
+
+ # provide private copies of plugins for integration tests
+ plugin_root = os.path.join(support_path, 'plugins')
+
+ plugin_list = [
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'netconf',
+ # 'shell' is not configurable
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ ]
+
+ # most plugins follow a standard naming convention
+ plugin_map = dict(('%s_plugins' % name, name) for name in plugin_list)
+
+ # these plugins do not follow the standard naming convention
+ plugin_map.update(
+ doc_fragment='doc_fragments',
+ library='modules',
+ module_utils='module_utils',
+ )
+
+ env.update(dict(('ANSIBLE_%s' % key.upper(), os.path.join(plugin_root, value)) for key, value in plugin_map.items()))
+
+ # only configure directories which exist
+ env = dict((key, value) for key, value in env.items() if os.path.isdir(value))
+
+ return env
+
+
+@mutex
+def get_ansible_python_path(args: CommonConfig) -> str:
+ """
+ Return a directory usable for PYTHONPATH, containing only the ansible package.
+ If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit.
+ """
+ try:
+ return get_ansible_python_path.python_path # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ if ANSIBLE_SOURCE_ROOT:
+ # when running from source there is no need for a temporary directory to isolate the ansible package
+ python_path = os.path.dirname(ANSIBLE_LIB_ROOT)
+ else:
+ # when not running from source the installed directory is unsafe to add to PYTHONPATH
+ # doing so would expose many unwanted packages on sys.path
+ # instead a temporary directory is created which contains only ansible using a symlink
+ python_path = create_temp_dir(prefix='ansible-test-')
+
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(python_path, 'ansible'))
+
+ if not args.explain:
+ generate_egg_info(python_path)
+
+ get_ansible_python_path.python_path = python_path # type: ignore[attr-defined]
+
+ return python_path
+
+
+def generate_egg_info(path: str) -> None:
+ """Generate an egg-info in the specified base directory."""
+ # minimal PKG-INFO stub following the format defined in PEP 241
+ # required for older setuptools versions to avoid a traceback when importing pkg_resources from packages like cryptography
+ # newer setuptools versions are happy with an empty directory
+ # including a stub here means we don't need to locate the existing file or have setup.py generate it when running from source
+ pkg_info = '''
+Metadata-Version: 1.0
+Name: ansible
+Version: %s
+Platform: UNKNOWN
+Summary: Radically simple IT automation
+Author-email: info@ansible.com
+License: GPLv3+
+''' % get_ansible_version()
+
+ pkg_info_path = os.path.join(path, 'ansible_core.egg-info', 'PKG-INFO')
+
+ if os.path.exists(pkg_info_path):
+ return
+
+ write_text_file(pkg_info_path, pkg_info.lstrip(), create_directories=True)
+
+
+class CollectionDetail:
+ """Collection detail."""
+ def __init__(self) -> None:
+ self.version: t.Optional[str] = None
+
+
+class CollectionDetailError(ApplicationError):
+ """An error occurred retrieving collection detail."""
+ def __init__(self, reason: str) -> None:
+ super().__init__('Error collecting collection detail: %s' % reason)
+ self.reason = reason
+
+
+def get_collection_detail(python: PythonConfig) -> CollectionDetail:
+ """Return collection detail."""
+ collection = data_context().content.collection
+ directory = os.path.join(collection.root, collection.directory)
+
+ stdout = raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'collection_detail.py'), directory], capture=True)[0]
+ result = json.loads(stdout)
+ error = result.get('error')
+
+ if error:
+ raise CollectionDetailError(error)
+
+ version = result.get('version')
+
+ detail = CollectionDetail()
+ detail.version = str(version) if version is not None else None
+
+ return detail
+
+
+def run_playbook(
+ args: EnvironmentConfig,
+ inventory_path: str,
+ playbook: str,
+ capture: bool,
+ variables: t.Optional[dict[str, t.Any]] = None,
+) -> None:
+ """Run the specified playbook using the given inventory file and playbook variables."""
+ playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
+ cmd = ['ansible-playbook', '-i', inventory_path, playbook_path]
+
+ if variables:
+ cmd.extend(['-e', json.dumps(variables)])
+
+ if args.verbosity:
+ cmd.append('-%s' % ('v' * args.verbosity))
+
+ install_requirements(args, args.controller_python, ansible=True) # run_playbook()
+ env = ansible_environment(args)
+ intercept_python(args, args.controller_python, cmd, env, capture=capture)
diff --git a/test/lib/ansible_test/_internal/become.py b/test/lib/ansible_test/_internal/become.py
new file mode 100644
index 0000000..e653959
--- /dev/null
+++ b/test/lib/ansible_test/_internal/become.py
@@ -0,0 +1,108 @@
+"""Become abstraction for interacting with test hosts."""
+from __future__ import annotations
+
+import abc
+import shlex
+
+from .util import (
+ get_subclasses,
+)
+
+
+class Become(metaclass=abc.ABCMeta):
+ """Base class for become implementations."""
+ @classmethod
+ def name(cls) -> str:
+ """The name of this plugin."""
+ return cls.__name__.lower()
+
+ @property
+ @abc.abstractmethod
+ def method(self) -> str:
+ """The name of the Ansible become plugin that is equivalent to this."""
+
+ @abc.abstractmethod
+ def prepare_command(self, command: list[str]) -> list[str]:
+ """Return the given command, if any, with privilege escalation."""
+
+
+class Doas(Become):
+ """Become using 'doas'."""
+ @property
+ def method(self) -> str:
+ """The name of the Ansible become plugin that is equivalent to this."""
+ raise NotImplementedError('Ansible has no built-in doas become plugin.')
+
+ def prepare_command(self, command: list[str]) -> list[str]:
+ """Return the given command, if any, with privilege escalation."""
+ become = ['doas', '-n']
+
+ if command:
+ become.extend(['sh', '-c', shlex.join(command)])
+ else:
+ become.extend(['-s'])
+
+ return become
+
+
+class DoasSudo(Doas):
+ """Become using 'doas' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands."""
+ @classmethod
+ def name(cls) -> str:
+ """The name of this plugin."""
+ return 'doas_sudo'
+
+ @property
+ def method(self) -> str:
+ """The name of the Ansible become plugin that is equivalent to this."""
+ return 'sudo'
+
+
+class Su(Become):
+ """Become using 'su'."""
+ @property
+ def method(self) -> str:
+ """The name of the Ansible become plugin that is equivalent to this."""
+ return 'su'
+
+ def prepare_command(self, command: list[str]) -> list[str]:
+ """Return the given command, if any, with privilege escalation."""
+ become = ['su', '-l', 'root']
+
+ if command:
+ become.extend(['-c', shlex.join(command)])
+
+ return become
+
+
+class SuSudo(Su):
+ """Become using 'su' in ansible-test and then after bootstrapping use 'sudo' for other ansible commands."""
+ @classmethod
+ def name(cls) -> str:
+ """The name of this plugin."""
+ return 'su_sudo'
+
+ @property
+ def method(self) -> str:
+ """The name of the Ansible become plugin that is equivalent to this."""
+ return 'sudo'
+
+
+class Sudo(Become):
+ """Become using 'sudo'."""
+ @property
+ def method(self) -> str:
+ """The name of the Ansible become plugin that is equivalent to this."""
+ return 'sudo'
+
+ def prepare_command(self, command: list[str]) -> list[str]:
+ """Return the given command, if any, with privilege escalation."""
+ become = ['sudo', '-in']
+
+ if command:
+ become.extend(['sh', '-c', shlex.join(command)])
+
+ return become
+
+
+SUPPORTED_BECOME_METHODS = {cls.name(): cls for cls in get_subclasses(Become)}
diff --git a/test/lib/ansible_test/_internal/bootstrap.py b/test/lib/ansible_test/_internal/bootstrap.py
new file mode 100644
index 0000000..261ef59
--- /dev/null
+++ b/test/lib/ansible_test/_internal/bootstrap.py
@@ -0,0 +1,95 @@
+"""Bootstrapping for test hosts."""
+from __future__ import annotations
+
+import dataclasses
+import os
+import typing as t
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ ANSIBLE_TEST_TARGET_ROOT,
+)
+
+from .util_common import (
+ ShellScriptTemplate,
+ set_shebang,
+)
+
+from .core_ci import (
+ SshKey,
+)
+
+
+@dataclasses.dataclass
+class Bootstrap:
+ """Base class for bootstrapping systems."""
+ controller: bool
+ python_versions: list[str]
+ ssh_key: SshKey
+
+ @property
+ def bootstrap_type(self) -> str:
+ """The bootstrap type to pass to the bootstrapping script."""
+ return self.__class__.__name__.replace('Bootstrap', '').lower()
+
+ def get_variables(self) -> dict[str, t.Union[str, list[str]]]:
+ """The variables to template in the bootstrapping script."""
+ return dict(
+ bootstrap_type=self.bootstrap_type,
+ controller='yes' if self.controller else '',
+ python_versions=self.python_versions,
+ ssh_key_type=self.ssh_key.KEY_TYPE,
+ ssh_private_key=self.ssh_key.key_contents,
+ ssh_public_key=self.ssh_key.pub_contents,
+ )
+
+ def get_script(self) -> str:
+ """Return a shell script to bootstrap the specified host."""
+ path = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'bootstrap.sh')
+
+ content = read_text_file(path)
+ content = set_shebang(content, '/bin/sh')
+
+ template = ShellScriptTemplate(content)
+
+ variables = self.get_variables()
+
+ script = template.substitute(**variables)
+
+ return script
+
+
+@dataclasses.dataclass
+class BootstrapDocker(Bootstrap):
+ """Bootstrap docker instances."""
+ def get_variables(self) -> dict[str, t.Union[str, list[str]]]:
+ """The variables to template in the bootstrapping script."""
+ variables = super().get_variables()
+
+ variables.update(
+ platform='',
+ platform_version='',
+ )
+
+ return variables
+
+
+@dataclasses.dataclass
+class BootstrapRemote(Bootstrap):
+ """Bootstrap remote instances."""
+ platform: str
+ platform_version: str
+
+ def get_variables(self) -> dict[str, t.Union[str, list[str]]]:
+ """The variables to template in the bootstrapping script."""
+ variables = super().get_variables()
+
+ variables.update(
+ platform=self.platform,
+ platform_version=self.platform_version,
+ )
+
+ return variables
diff --git a/test/lib/ansible_test/_internal/cache.py b/test/lib/ansible_test/_internal/cache.py
new file mode 100644
index 0000000..3afe422
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cache.py
@@ -0,0 +1,31 @@
+"""Cache for commonly shared data that is intended to be immutable."""
+from __future__ import annotations
+
+import collections.abc as c
+import typing as t
+
+from .config import (
+ CommonConfig,
+)
+
+TValue = t.TypeVar('TValue')
+
+
+class CommonCache:
+ """Common cache."""
+ def __init__(self, args: CommonConfig) -> None:
+ self.args = args
+
+ def get(self, key: str, factory: c.Callable[[], TValue]) -> TValue:
+ """Return the value from the cache identified by the given key, using the specified factory method if it is not found."""
+ if key not in self.args.cache:
+ self.args.cache[key] = factory()
+
+ return self.args.cache[key]
+
+ def get_with_args(self, key: str, factory: c.Callable[[CommonConfig], TValue]) -> TValue:
+ """Return the value from the cache identified by the given key, using the specified factory method (which accepts args) if it is not found."""
+ if key not in self.args.cache:
+ self.args.cache[key] = factory(self.args)
+
+ return self.args.cache[key]
diff --git a/test/lib/ansible_test/_internal/cgroup.py b/test/lib/ansible_test/_internal/cgroup.py
new file mode 100644
index 0000000..977e359
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cgroup.py
@@ -0,0 +1,110 @@
+"""Linux control group constants, classes and utilities."""
+from __future__ import annotations
+
+import codecs
+import dataclasses
+import pathlib
+import re
+
+
+class CGroupPath:
+ """Linux cgroup path constants."""
+ ROOT = '/sys/fs/cgroup'
+ SYSTEMD = '/sys/fs/cgroup/systemd'
+ SYSTEMD_RELEASE_AGENT = '/sys/fs/cgroup/systemd/release_agent'
+
+
+class MountType:
+ """Linux filesystem mount type constants."""
+ TMPFS = 'tmpfs'
+ CGROUP_V1 = 'cgroup'
+ CGROUP_V2 = 'cgroup2'
+
+
+@dataclasses.dataclass(frozen=True)
+class CGroupEntry:
+ """A single cgroup entry parsed from '/proc/{pid}/cgroup' in the proc filesystem."""
+ id: int
+ subsystem: str
+ path: pathlib.PurePosixPath
+
+ @property
+ def root_path(self) -> pathlib.PurePosixPath:
+ """The root path for this cgroup subsystem."""
+ return pathlib.PurePosixPath(CGroupPath.ROOT, self.subsystem)
+
+ @property
+ def full_path(self) -> pathlib.PurePosixPath:
+ """The full path for this cgroup subsystem."""
+ return pathlib.PurePosixPath(self.root_path, str(self.path).lstrip('/'))
+
+ @classmethod
+ def parse(cls, value: str) -> CGroupEntry:
+ """Parse the given cgroup line from the proc filesystem and return a cgroup entry."""
+ cid, subsystem, path = value.split(':')
+
+ return cls(
+ id=int(cid),
+ subsystem=subsystem.removeprefix('name='),
+ path=pathlib.PurePosixPath(path)
+ )
+
+ @classmethod
+ def loads(cls, value: str) -> tuple[CGroupEntry, ...]:
+ """Parse the given output from the proc filesystem and return a tuple of cgroup entries."""
+ return tuple(cls.parse(line) for line in value.splitlines())
+
+
+@dataclasses.dataclass(frozen=True)
+class MountEntry:
+ """A single mount info entry parsed from '/proc/{pid}/mountinfo' in the proc filesystem."""
+ mount_id: int
+ parent_id: int
+ device_major: int
+ device_minor: int
+ root: pathlib.PurePosixPath
+ path: pathlib.PurePosixPath
+ options: tuple[str, ...]
+ fields: tuple[str, ...]
+ type: str
+ source: pathlib.PurePosixPath
+ super_options: tuple[str, ...]
+
+ @classmethod
+ def parse(cls, value: str) -> MountEntry:
+ """Parse the given mount info line from the proc filesystem and return a mount entry."""
+ # See: https://man7.org/linux/man-pages/man5/proc.5.html
+ # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L135
+ mount_id, parent_id, device_major_minor, root, path, options, *remainder = value.split(' ')
+ fields = remainder[:-4]
+ separator, mtype, source, super_options = remainder[-4:]
+
+ assert separator == '-'
+
+ device_major, device_minor = device_major_minor.split(':')
+
+ return cls(
+ mount_id=int(mount_id),
+ parent_id=int(parent_id),
+ device_major=int(device_major),
+ device_minor=int(device_minor),
+ root=_decode_path(root),
+ path=_decode_path(path),
+ options=tuple(options.split(',')),
+ fields=tuple(fields),
+ type=mtype,
+ source=_decode_path(source),
+ super_options=tuple(super_options.split(',')),
+ )
+
+ @classmethod
+ def loads(cls, value: str) -> tuple[MountEntry, ...]:
+ """Parse the given output from the proc filesystem and return a tuple of mount info entries."""
+ return tuple(cls.parse(line) for line in value.splitlines())
+
+
+def _decode_path(value: str) -> pathlib.PurePosixPath:
+ """Decode and return a path which may contain octal escape sequences."""
+ # See: https://github.com/torvalds/linux/blob/aea23e7c464bfdec04b52cf61edb62030e9e0d0a/fs/proc_namespace.c#L150
+ path = re.sub(r'(\\[0-7]{3})', lambda m: codecs.decode(m.group(0).encode('ascii'), 'unicode_escape'), value)
+ return pathlib.PurePosixPath(path)
diff --git a/test/lib/ansible_test/_internal/ci/__init__.py b/test/lib/ansible_test/_internal/ci/__init__.py
new file mode 100644
index 0000000..97e41da
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/__init__.py
@@ -0,0 +1,214 @@
+"""Support code for CI environments."""
+from __future__ import annotations
+
+import abc
+import base64
+import json
+import os
+import tempfile
+import typing as t
+
+from ..encoding import (
+ to_bytes,
+ to_text,
+)
+
+from ..io import (
+ read_text_file,
+ write_text_file,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ get_subclasses,
+ import_plugins,
+ raw_command,
+ cache,
+)
+
+
+class ChangeDetectionNotSupported(ApplicationError):
+ """Exception for cases where change detection is not supported."""
+
+
+class CIProvider(metaclass=abc.ABCMeta):
+ """Base class for CI provider plugins."""
+ priority = 500
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_supported() -> bool:
+ """Return True if this provider is supported in the current running environment."""
+
+ @property
+ @abc.abstractmethod
+ def code(self) -> str:
+ """Return a unique code representing this provider."""
+
+ @property
+ @abc.abstractmethod
+ def name(self) -> str:
+ """Return descriptive name for this provider."""
+
+ @abc.abstractmethod
+ def generate_resource_prefix(self) -> str:
+ """Return a resource prefix specific to this CI provider."""
+
+ @abc.abstractmethod
+ def get_base_branch(self) -> str:
+ """Return the base branch or an empty string."""
+
+ @abc.abstractmethod
+ def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
+ """Initialize change detection."""
+
+ @abc.abstractmethod
+ def supports_core_ci_auth(self) -> bool:
+ """Return True if Ansible Core CI is supported."""
+
+ @abc.abstractmethod
+ def prepare_core_ci_auth(self) -> dict[str, t.Any]:
+ """Return authentication details for Ansible Core CI."""
+
+ @abc.abstractmethod
+ def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
+ """Return details about git in the current environment."""
+
+
+@cache
+def get_ci_provider() -> CIProvider:
+ """Return a CI provider instance for the current environment."""
+ provider = None
+
+ import_plugins('ci')
+
+ candidates = sorted(get_subclasses(CIProvider), key=lambda subclass: (subclass.priority, subclass.__name__))
+
+ for candidate in candidates:
+ if candidate.is_supported():
+ provider = candidate()
+ break
+
+ if provider.code:
+ display.info('Detected CI provider: %s' % provider.name)
+
+ return provider
+
+
+class AuthHelper(metaclass=abc.ABCMeta):
+ """Public key based authentication helper for Ansible Core CI."""
+ def sign_request(self, request: dict[str, t.Any]) -> None:
+ """Sign the given auth request and make the public key available."""
+ payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
+ signature_raw_bytes = self.sign_bytes(payload_bytes)
+ signature = to_text(base64.b64encode(signature_raw_bytes))
+
+ request.update(signature=signature)
+
+ def initialize_private_key(self) -> str:
+ """
+ Initialize and publish a new key pair (if needed) and return the private key.
+ The private key is cached across ansible-test invocations, so it is only generated and published once per CI job.
+ """
+ path = os.path.expanduser('~/.ansible-core-ci-private.key')
+
+ if os.path.exists(to_bytes(path)):
+ private_key_pem = read_text_file(path)
+ else:
+ private_key_pem = self.generate_private_key()
+ write_text_file(path, private_key_pem)
+
+ return private_key_pem
+
+ @abc.abstractmethod
+ def sign_bytes(self, payload_bytes: bytes) -> bytes:
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+
+ @abc.abstractmethod
+ def publish_public_key(self, public_key_pem: str) -> None:
+ """Publish the given public key."""
+
+ @abc.abstractmethod
+ def generate_private_key(self) -> str:
+ """Generate a new key pair, publishing the public key and returning the private key."""
+
+
+class CryptographyAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
+ """Cryptography based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes: bytes) -> bytes:
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives.asymmetric import ec
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+
+ private_key_pem = self.initialize_private_key()
+ private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
+
+ assert isinstance(private_key, ec.EllipticCurvePrivateKey)
+
+ signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
+
+ return signature_raw_bytes
+
+ def generate_private_key(self) -> str:
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.primitives.asymmetric import ec
+
+ private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
+ public_key = private_key.public_key()
+
+ private_key_pem = to_text(private_key.private_bytes( # type: ignore[attr-defined] # documented method, but missing from type stubs
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.PKCS8,
+ encryption_algorithm=serialization.NoEncryption(),
+ ))
+
+ public_key_pem = to_text(public_key.public_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
+ ))
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
+
+
+class OpenSSLAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
+ """OpenSSL based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes: bytes) -> bytes:
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ private_key_pem = self.initialize_private_key()
+
+ with tempfile.NamedTemporaryFile() as private_key_file:
+ private_key_file.write(to_bytes(private_key_pem))
+ private_key_file.flush()
+
+ with tempfile.NamedTemporaryFile() as payload_file:
+ payload_file.write(payload_bytes)
+ payload_file.flush()
+
+ with tempfile.NamedTemporaryFile() as signature_file:
+ raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
+ signature_raw_bytes = signature_file.read()
+
+ return signature_raw_bytes
+
+ def generate_private_key(self) -> str:
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
+ public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
diff --git a/test/lib/ansible_test/_internal/ci/azp.py b/test/lib/ansible_test/_internal/ci/azp.py
new file mode 100644
index 0000000..9170dfe
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/azp.py
@@ -0,0 +1,262 @@
+"""Support code for working with Azure Pipelines."""
+from __future__ import annotations
+
+import os
+import tempfile
+import uuid
+import typing as t
+import urllib.parse
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..http import (
+ HttpClient,
+)
+
+from ..util import (
+ display,
+ MissingEnvironmentVariable,
+)
+
+from . import (
+ ChangeDetectionNotSupported,
+ CIProvider,
+ CryptographyAuthHelper,
+)
+
+CODE = 'azp'
+
+
+class AzurePipelines(CIProvider):
+ """CI provider implementation for Azure Pipelines."""
+ def __init__(self) -> None:
+ self.auth = AzurePipelinesAuthHelper()
+
+ @staticmethod
+ def is_supported() -> bool:
+ """Return True if this provider is supported in the current running environment."""
+ return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
+
+ @property
+ def code(self) -> str:
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self) -> str:
+ """Return descriptive name for this provider."""
+ return 'Azure Pipelines'
+
+ def generate_resource_prefix(self) -> str:
+ """Return a resource prefix specific to this CI provider."""
+ try:
+ prefix = 'azp-%s-%s-%s' % (
+ os.environ['BUILD_BUILDID'],
+ os.environ['SYSTEM_JOBATTEMPT'],
+ os.environ['SYSTEM_JOBIDENTIFIER'],
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ return prefix
+
+ def get_base_branch(self) -> str:
+ """Return the base branch or an empty string."""
+ base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
+
+ if base_branch:
+ base_branch = 'origin/%s' % base_branch
+
+ return base_branch or ''
+
+ def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
+ """Initialize change detection."""
+ result = AzurePipelinesChanges(args)
+
+ if result.is_pr:
+ job_type = 'pull request'
+ else:
+ job_type = 'merge commit'
+
+ display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ if result.paths is None:
+ # There are several likely causes of this:
+ # - First run on a new branch.
+ # - Too many pull requests passed since the last merge run passed.
+ display.warning('No successful commit found. All tests will be executed.')
+
+ return result.paths
+
+ def supports_core_ci_auth(self) -> bool:
+ """Return True if Ansible Core CI is supported."""
+ return True
+
+ def prepare_core_ci_auth(self) -> dict[str, t.Any]:
+ """Return authentication details for Ansible Core CI."""
+ try:
+ request = dict(
+ org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
+ project_name=os.environ['SYSTEM_TEAMPROJECT'],
+ build_id=int(os.environ['BUILD_BUILDID']),
+ task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ self.auth.sign_request(request)
+
+ auth = dict(
+ azp=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
+ """Return details about git in the current environment."""
+ changes = AzurePipelinesChanges(args)
+
+ details = dict(
+ base_commit=changes.base_commit,
+ commit=changes.commit,
+ )
+
+ return details
+
+
+class AzurePipelinesAuthHelper(CryptographyAuthHelper):
+ """
+ Authentication helper for Azure Pipelines.
+ Based on cryptography since it is provided by the default Azure Pipelines environment.
+ """
+ def publish_public_key(self, public_key_pem: str) -> None:
+ """Publish the given public key."""
+ try:
+ agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ # the temporary file cannot be deleted because we do not know when the agent has processed it
+ # placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
+ with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
+ public_key_file.write(to_bytes(public_key_pem))
+ public_key_file.flush()
+
+ # make the agent aware of the public key by declaring it as an attachment
+ vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
+
+
+class AzurePipelinesChanges:
+ """Change information for an Azure Pipelines build."""
+ def __init__(self, args: CommonConfig) -> None:
+ self.args = args
+ self.git = Git()
+
+ try:
+ self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
+ self.project = os.environ['SYSTEM_TEAMPROJECT']
+ self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
+ self.source_branch = os.environ['BUILD_SOURCEBRANCH']
+ self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
+ self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ if self.source_branch.startswith('refs/tags/'):
+ raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
+
+ self.org = self.org_uri.strip('/').split('/')[-1]
+ self.is_pr = self.pr_branch_name is not None
+
+ if self.is_pr:
+ # HEAD is a merge commit of the PR branch into the target branch
+ # HEAD^1 is HEAD of the target branch (first parent of merge commit)
+ # HEAD^2 is HEAD of the PR branch (second parent of merge commit)
+ # see: https://git-scm.com/docs/gitrevisions
+ self.branch = self.pr_branch_name
+ self.base_commit = 'HEAD^1'
+ self.commit = 'HEAD^2'
+ else:
+ commits = self.get_successful_merge_run_commits()
+
+ self.branch = self.source_branch_name
+ self.base_commit = self.get_last_successful_commit(commits)
+ self.commit = 'HEAD'
+
+ self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
+
+ if self.base_commit:
+ self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
+
+ # <commit>...<commit>
+ # This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
+ # see: https://git-scm.com/docs/git-diff
+ dot_range = '%s...%s' % (self.base_commit, self.commit)
+
+ self.paths = sorted(self.git.get_diff_names([dot_range]))
+ self.diff = self.git.get_diff([dot_range])
+ else:
+ self.paths = None # act as though change detection not enabled, do not filter targets
+ self.diff = []
+
+ def get_successful_merge_run_commits(self) -> set[str]:
+ """Return a set of recent successsful merge commits from Azure Pipelines."""
+ parameters = dict(
+ maxBuildsPerDefinition=100, # max 5000
+ queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
+ resultFilter='succeeded',
+ reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
+ repositoryType=self.repo_type,
+ repositoryId='%s/%s' % (self.org, self.project),
+ )
+
+ url = '%s%s/_apis/build/builds?api-version=6.0&%s' % (self.org_uri, self.project, urllib.parse.urlencode(parameters))
+
+ http = HttpClient(self.args, always=True)
+ response = http.get(url)
+
+ # noinspection PyBroadException
+ try:
+ result = response.json()
+ except Exception: # pylint: disable=broad-except
+ # most likely due to a private project, which returns an HTTP 203 response with HTML
+ display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
+ return set()
+
+ commits = set(build['sourceVersion'] for build in result['value'])
+
+ return commits
+
+ def get_last_successful_commit(self, commits: set[str]) -> t.Optional[str]:
+ """Return the last successful commit from git history that is found in the given commit list, or None."""
+ commit_history = self.git.get_rev_list(max_count=100)
+ ordered_successful_commits = [commit for commit in commit_history if commit in commits]
+ last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
+ return last_successful_commit
+
+
+def vso_add_attachment(file_type: str, file_name: str, path: str) -> None:
+ """Upload and attach a file to the current timeline record."""
+ vso('task.addattachment', dict(type=file_type, name=file_name), path)
+
+
+def vso(name: str, data: dict[str, str], message: str) -> None:
+ """
+ Write a logging command for the Azure Pipelines agent to process.
+ See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
+ """
+ display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
diff --git a/test/lib/ansible_test/_internal/ci/local.py b/test/lib/ansible_test/_internal/ci/local.py
new file mode 100644
index 0000000..ec03194
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/local.py
@@ -0,0 +1,212 @@
+"""Support code for working without a supported CI provider."""
+from __future__ import annotations
+
+import os
+import platform
+import random
+import re
+import typing as t
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ is_binary_file,
+ SubprocessError,
+)
+
+from . import (
+ CIProvider,
+)
+
+CODE = '' # not really a CI provider, so use an empty string for the code
+
+
+class Local(CIProvider):
+ """CI provider implementation when not using CI."""
+ priority = 1000
+
+ @staticmethod
+ def is_supported() -> bool:
+ """Return True if this provider is supported in the current running environment."""
+ return True
+
+ @property
+ def code(self) -> str:
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self) -> str:
+ """Return descriptive name for this provider."""
+ return 'Local'
+
+ def generate_resource_prefix(self) -> str:
+ """Return a resource prefix specific to this CI provider."""
+ prefix = 'ansible-test-%d-%s' % (
+ random.randint(10000000, 99999999),
+ platform.node().split('.')[0],
+ )
+
+ return prefix
+
+ def get_base_branch(self) -> str:
+ """Return the base branch or an empty string."""
+ return ''
+
+ def detect_changes(self, args: TestConfig) -> t.Optional[list[str]]:
+ """Initialize change detection."""
+ result = LocalChanges(args)
+
+ display.info('Detected branch %s forked from %s at commit %s' % (
+ result.current_branch, result.fork_branch, result.fork_point))
+
+ if result.untracked and not args.untracked:
+ display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
+ len(result.untracked))
+
+ if result.committed and not args.committed:
+ display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
+ len(result.committed))
+
+ if result.staged and not args.staged:
+ display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
+ len(result.staged))
+
+ if result.unstaged and not args.unstaged:
+ display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
+ len(result.unstaged))
+
+ names = set()
+
+ if args.tracked:
+ names |= set(result.tracked)
+ if args.untracked:
+ names |= set(result.untracked)
+ if args.committed:
+ names |= set(result.committed)
+ if args.staged:
+ names |= set(result.staged)
+ if args.unstaged:
+ names |= set(result.unstaged)
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ for path in result.untracked:
+ if is_binary_file(path):
+ args.metadata.changes[path] = ((0, 0),)
+ continue
+
+ line_count = len(read_text_file(path).splitlines())
+
+ args.metadata.changes[path] = ((1, line_count),)
+
+ return sorted(names)
+
+ def supports_core_ci_auth(self) -> bool:
+ """Return True if Ansible Core CI is supported."""
+ path = self._get_aci_key_path()
+ return os.path.exists(path)
+
+ def prepare_core_ci_auth(self) -> dict[str, t.Any]:
+ """Return authentication details for Ansible Core CI."""
+ path = self._get_aci_key_path()
+ auth_key = read_text_file(path).strip()
+
+ request = dict(
+ key=auth_key,
+ nonce=None,
+ )
+
+ auth = dict(
+ remote=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args: CommonConfig) -> t.Optional[dict[str, t.Any]]:
+ """Return details about git in the current environment."""
+ return None # not yet implemented for local
+
+ @staticmethod
+ def _get_aci_key_path() -> str:
+ path = os.path.expanduser('~/.ansible-core-ci.key')
+ return path
+
+
+class InvalidBranch(ApplicationError):
+ """Exception for invalid branch specification."""
+ def __init__(self, branch: str, reason: str) -> None:
+ message = 'Invalid branch: %s\n%s' % (branch, reason)
+
+ super().__init__(message)
+
+ self.branch = branch
+
+
+class LocalChanges:
+ """Change information for local work."""
+ def __init__(self, args: TestConfig) -> None:
+ self.args = args
+ self.git = Git()
+
+ self.current_branch = self.git.get_branch()
+
+ if self.is_official_branch(self.current_branch):
+ raise InvalidBranch(branch=self.current_branch,
+ reason='Current branch is not a feature branch.')
+
+ self.fork_branch = None
+ self.fork_point = None
+
+ self.local_branches = sorted(self.git.get_branches())
+ self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
+
+ for self.fork_branch in self.official_branches:
+ try:
+ self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
+ break
+ except SubprocessError:
+ pass
+
+ if self.fork_point is None:
+ raise ApplicationError('Unable to auto-detect fork branch and fork point.')
+
+ # tracked files (including unchanged)
+ self.tracked = sorted(self.git.get_file_names(['--cached']))
+ # untracked files (except ignored)
+ self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
+ # tracked changes (including deletions) committed since the branch was forked
+ self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
+ # tracked changes (including deletions) which are staged
+ self.staged = sorted(self.git.get_diff_names(['--cached']))
+ # tracked changes (including deletions) which are not staged
+ self.unstaged = sorted(self.git.get_diff_names([]))
+ # diff of all tracked files from fork point to working copy
+ self.diff = self.git.get_diff([self.fork_point])
+
+ def is_official_branch(self, name: str) -> bool:
+ """Return True if the given branch name an official branch for development or releases."""
+ if self.args.base_branch:
+ return name == self.args.base_branch
+
+ if name == 'devel':
+ return True
+
+ if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/classification/__init__.py b/test/lib/ansible_test/_internal/classification/__init__.py
new file mode 100644
index 0000000..aacc2ca
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification/__init__.py
@@ -0,0 +1,900 @@
+"""Classify changes in Ansible code."""
+from __future__ import annotations
+
+import collections
+import os
+import re
+import time
+import typing as t
+
+from ..target import (
+ walk_module_targets,
+ walk_integration_targets,
+ walk_units_targets,
+ walk_compile_targets,
+ walk_sanity_targets,
+ load_integration_prefixes,
+ analyze_integration_target_dependencies,
+ IntegrationTarget,
+)
+
+from ..util import (
+ display,
+ is_subdir,
+)
+
+from .python import (
+ get_python_module_utils_imports,
+ get_python_module_utils_name,
+)
+
+from .csharp import (
+ get_csharp_module_utils_imports,
+ get_csharp_module_utils_name,
+)
+
+from .powershell import (
+ get_powershell_module_utils_imports,
+ get_powershell_module_utils_name,
+)
+
+from ..config import (
+ TestConfig,
+ IntegrationConfig,
+)
+
+from ..metadata import (
+ ChangeDescription,
+)
+
+from ..data import (
+ data_context,
+)
+
+FOCUSED_TARGET = '__focused__'
+
+
+def categorize_changes(args: TestConfig, paths: list[str], verbose_command: t.Optional[str] = None) -> ChangeDescription:
+ """Categorize the given list of changed paths and return a description of the changes."""
+ mapper = PathMapper(args)
+
+ commands: dict[str, set[str]] = {
+ 'sanity': set(),
+ 'units': set(),
+ 'integration': set(),
+ 'windows-integration': set(),
+ 'network-integration': set(),
+ }
+
+ focused_commands = collections.defaultdict(set)
+
+ deleted_paths: set[str] = set()
+ original_paths: set[str] = set()
+ additional_paths: set[str] = set()
+ no_integration_paths: set[str] = set()
+
+ for path in paths:
+ if not os.path.exists(path):
+ deleted_paths.add(path)
+ continue
+
+ original_paths.add(path)
+
+ dependent_paths = mapper.get_dependent_paths(path)
+
+ if not dependent_paths:
+ continue
+
+ display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
+
+ for dependent_path in dependent_paths:
+ display.info(dependent_path, verbosity=2)
+ additional_paths.add(dependent_path)
+
+ additional_paths -= set(paths) # don't count changed paths as additional paths
+
+ if additional_paths:
+ display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
+ paths = sorted(set(paths) | additional_paths)
+
+ display.info('Mapping %d changed file(s) to tests.' % len(paths))
+
+ none_count = 0
+
+ for path in paths:
+ tests = mapper.classify(path)
+
+ if tests is None:
+ focused_target = False
+
+ display.info('%s -> all' % path, verbosity=1)
+ tests = all_tests(args) # not categorized, run all tests
+ display.warning('Path not categorized: %s' % path)
+ else:
+ focused_target = bool(tests.pop(FOCUSED_TARGET, None)) and path in original_paths
+
+ tests = dict((key, value) for key, value in tests.items() if value)
+
+ if focused_target and not any('integration' in command for command in tests):
+ no_integration_paths.add(path) # path triggers no integration tests
+
+ if verbose_command:
+ result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
+
+ # identify targeted integration tests (those which only target a single integration command)
+ if 'integration' in verbose_command and tests.get(verbose_command):
+ if not any('integration' in command for command in tests if command != verbose_command):
+ if focused_target:
+ result += ' (focused)'
+
+ result += ' (targeted)'
+ else:
+ result = '%s' % tests
+
+ if not tests.get(verbose_command):
+ # minimize excessive output from potentially thousands of files which do not trigger tests
+ none_count += 1
+ verbosity = 2
+ else:
+ verbosity = 1
+
+ if args.verbosity >= verbosity:
+ display.info('%s -> %s' % (path, result), verbosity=1)
+
+ for command, target in tests.items():
+ commands[command].add(target)
+
+ if focused_target:
+ focused_commands[command].add(target)
+
+ if none_count > 0 and args.verbosity < 2:
+ display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
+
+ for command, targets in commands.items():
+ targets.discard('none')
+
+ if any(target == 'all' for target in targets):
+ commands[command] = {'all'}
+
+ sorted_commands = dict((cmd, sorted(targets)) for cmd, targets in commands.items() if targets)
+ focused_commands = dict((cmd, sorted(targets)) for cmd, targets in focused_commands.items())
+
+ for command, targets in sorted_commands.items():
+ if targets == ['all']:
+ sorted_commands[command] = [] # changes require testing all targets, do not filter targets
+
+ changes = ChangeDescription()
+ changes.command = verbose_command
+ changes.changed_paths = sorted(original_paths)
+ changes.deleted_paths = sorted(deleted_paths)
+ changes.regular_command_targets = sorted_commands
+ changes.focused_command_targets = focused_commands
+ changes.no_integration_paths = sorted(no_integration_paths)
+
+ return changes
+
+
+class PathMapper:
+ """Map file paths to test commands and targets."""
+ def __init__(self, args: TestConfig) -> None:
+ self.args = args
+ self.integration_all_target = get_integration_all_target(self.args)
+
+ self.integration_targets = list(walk_integration_targets())
+ self.module_targets = list(walk_module_targets())
+ self.compile_targets = list(walk_compile_targets())
+ self.units_targets = list(walk_units_targets())
+ self.sanity_targets = list(walk_sanity_targets())
+ self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
+ self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
+
+ self.units_modules = set(target.module for target in self.units_targets if target.module)
+ self.units_paths = set(a for target in self.units_targets for a in target.aliases)
+ self.sanity_paths = set(target.path for target in self.sanity_targets)
+
+ self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
+ self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
+ self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
+
+ self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'posix/' in target.aliases for m in target.modules)
+ self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'windows/' in target.aliases for m in target.modules)
+ self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'network/' in target.aliases for m in target.modules)
+
+ self.prefixes = load_integration_prefixes()
+ self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
+
+ self.python_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
+ self.powershell_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
+ self.csharp_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
+
+ self.paths_to_dependent_targets: dict[str, set[IntegrationTarget]] = {}
+
+ for target in self.integration_targets:
+ for path in target.needs_file:
+ if path not in self.paths_to_dependent_targets:
+ self.paths_to_dependent_targets[path] = set()
+
+ self.paths_to_dependent_targets[path].add(target)
+
+ def get_dependent_paths(self, path: str) -> list[str]:
+ """Return a list of paths which depend on the given path, recursively expanding dependent paths as well."""
+ unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
+ paths = set()
+
+ while unprocessed_paths:
+ queued_paths = list(unprocessed_paths)
+ paths |= unprocessed_paths
+ unprocessed_paths = set()
+
+ for queued_path in queued_paths:
+ new_paths = self.get_dependent_paths_non_recursive(queued_path)
+
+ for new_path in new_paths:
+ if new_path not in paths:
+ unprocessed_paths.add(new_path)
+
+ return sorted(paths)
+
+ def get_dependent_paths_non_recursive(self, path: str) -> list[str]:
+ """Return a list of paths which depend on the given path, including dependent integration test target paths."""
+ paths = self.get_dependent_paths_internal(path)
+ paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
+ paths = sorted(set(paths))
+
+ return paths
+
+ def get_dependent_paths_internal(self, path: str) -> list[str]:
+ """Return a list of paths which depend on the given path."""
+ ext = os.path.splitext(os.path.split(path)[1])[1]
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.py':
+ return self.get_python_module_utils_usage(path)
+
+ if ext == '.psm1':
+ return self.get_powershell_module_utils_usage(path)
+
+ if ext == '.cs':
+ return self.get_csharp_module_utils_usage(path)
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ return self.get_integration_target_usage(path)
+
+ return []
+
+ def get_python_module_utils_usage(self, path: str) -> list[str]:
+ """Return a list of paths which depend on the given path which is a Python module_utils file."""
+ if not self.python_module_utils_imports:
+ display.info('Analyzing python module_utils imports...')
+ before = time.time()
+ self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
+ after = time.time()
+ display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
+
+ name = get_python_module_utils_name(path)
+
+ return sorted(self.python_module_utils_imports[name])
+
+ def get_powershell_module_utils_usage(self, path: str) -> list[str]:
+ """Return a list of paths which depend on the given path which is a PowerShell module_utils file."""
+ if not self.powershell_module_utils_imports:
+ display.info('Analyzing powershell module_utils imports...')
+ before = time.time()
+ self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
+ after = time.time()
+ display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
+
+ name = get_powershell_module_utils_name(path)
+
+ return sorted(self.powershell_module_utils_imports[name])
+
+ def get_csharp_module_utils_usage(self, path: str) -> list[str]:
+ """Return a list of paths which depend on the given path which is a C# module_utils file."""
+ if not self.csharp_module_utils_imports:
+ display.info('Analyzing C# module_utils imports...')
+ before = time.time()
+ self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
+ after = time.time()
+ display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
+
+ name = get_csharp_module_utils_name(path)
+
+ return sorted(self.csharp_module_utils_imports[name])
+
+ def get_integration_target_usage(self, path: str) -> list[str]:
+ """Return a list of paths which depend on the given path which is an integration target file."""
+ target_name = path.split('/')[3]
+ dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
+ for target in sorted(self.integration_dependencies.get(target_name, set()))]
+
+ return dependents
+
+ def classify(self, path: str) -> t.Optional[dict[str, str]]:
+ """Classify the given path and return an optional dictionary of the results."""
+ result = self._classify(path)
+
+ # run all tests when no result given
+ if result is None:
+ return None
+
+ # run sanity on path unless result specified otherwise
+ if path in self.sanity_paths and 'sanity' not in result:
+ result['sanity'] = path
+
+ return result
+
+ def _classify(self, path: str) -> t.Optional[dict[str, str]]:
+ """Return the classification for the given path."""
+ if data_context().content.is_ansible:
+ return self._classify_ansible(path)
+
+ if data_context().content.collection:
+ return self._classify_collection(path)
+
+ return None
+
+ def _classify_common(self, path: str) -> t.Optional[dict[str, str]]:
+ """Return the classification for the given path using rules common to all layouts."""
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal: dict[str, str] = {}
+
+ if os.path.sep not in path:
+ if filename in (
+ 'azure-pipelines.yml',
+ ):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.azure-pipelines'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.github'):
+ return minimal
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ if not os.path.exists(path):
+ return minimal
+
+ target = self.integration_targets_by_name.get(path.split('/')[3])
+
+ if not target:
+ display.warning('Unexpected non-target found: %s' % path)
+ return minimal
+
+ if 'hidden/' in target.aliases:
+ return minimal # already expanded using get_dependent_paths
+
+ return {
+ 'integration': target.name if 'posix/' in target.aliases else None,
+ 'windows-integration': target.name if 'windows/' in target.aliases else None,
+ 'network-integration': target.name if 'network/' in target.aliases else None,
+ FOCUSED_TARGET: target.name,
+ }
+
+ if is_subdir(path, data_context().content.integration_path):
+ if dirname == data_context().content.integration_path:
+ for command in (
+ 'integration',
+ 'windows-integration',
+ 'network-integration',
+ ):
+ if name == command and ext == '.cfg':
+ return {
+ command: self.integration_all_target,
+ }
+
+ if name == command + '.requirements' and ext == '.txt':
+ return {
+ command: self.integration_all_target,
+ }
+
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ }
+
+ if is_subdir(path, data_context().content.sanity_path):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ }
+
+ if is_subdir(path, data_context().content.unit_path):
+ if path in self.units_paths:
+ return {
+ 'units': path,
+ }
+
+ # changes to files which are not unit tests should trigger tests from the nearest parent directory
+
+ test_path = os.path.dirname(path)
+
+ while test_path:
+ if test_path + '/' in self.units_paths:
+ return {
+ 'units': test_path + '/',
+ }
+
+ test_path = os.path.dirname(test_path)
+
+ if is_subdir(path, data_context().content.module_path):
+ module_name = self.module_names_by_path.get(path)
+
+ if module_name:
+ return {
+ 'units': module_name if module_name in self.units_modules else None,
+ 'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
+ 'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
+ 'network-integration': self.network_integration_by_module.get(module_name),
+ FOCUSED_TARGET: module_name,
+ }
+
+ return minimal
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.cs':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.psm1':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.py':
+ return minimal # already expanded using get_dependent_paths
+
+ if is_subdir(path, data_context().content.plugin_paths['action']):
+ if ext == '.py':
+ if name.startswith('net_'):
+ network_target = 'network/.*_%s' % name[4:]
+
+ if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if self.prefixes.get(name) == 'network':
+ network_platform = name
+ elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
+ network_platform = name[:-7]
+ elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
+ network_platform = name[:-9]
+ else:
+ network_platform = None
+
+ if network_platform:
+ network_target = 'network/%s/' % network_platform
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['connection']):
+ units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection')
+ if name == '__init__':
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': os.path.join(units_dir, ''),
+ }
+
+ units_path = os.path.join(units_dir, 'test_%s.py' % name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ integration_name = 'connection_%s' % name
+
+ if integration_name not in self.integration_targets_by_name:
+ integration_name = None
+
+ windows_integration_name = 'connection_windows_%s' % name
+
+ if windows_integration_name not in self.integration_targets_by_name:
+ windows_integration_name = None
+
+ # entire integration test commands depend on these connection plugins
+
+ if name in ['winrm', 'psrp']:
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'local':
+ return {
+ 'integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'network_cli':
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'paramiko_ssh':
+ return {
+ 'integration': integration_name,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ # other connection plugins have isolated integration and unit tests
+
+ return {
+ 'integration': integration_name,
+ 'windows-integration': windows_integration_name,
+ 'units': units_path,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
+ return {
+ 'sanity': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['inventory']):
+ if name == '__init__':
+ return all_tests(self.args) # broad impact, run all tests
+
+ # These inventory plugins are enabled by default (see INVENTORY_ENABLED).
+ # Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
+ test_all = [
+ 'host_list',
+ 'script',
+ 'yaml',
+ 'ini',
+ 'auto',
+ ]
+
+ if name in test_all:
+ posix_integration_fallback = get_integration_all_target(self.args)
+ else:
+ posix_integration_fallback = None
+
+ target = self.integration_targets_by_name.get('inventory_%s' % name)
+ units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory')
+ units_path = os.path.join(units_dir, 'test_%s.py' % name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
+ 'windows-integration': target.name if target and 'windows/' in target.aliases else None,
+ 'network-integration': target.name if target and 'network/' in target.aliases else None,
+ 'units': units_path,
+ FOCUSED_TARGET: target.name if target else None,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['filter']):
+ return self._simple_plugin_tests('filter', name)
+
+ if is_subdir(path, data_context().content.plugin_paths['lookup']):
+ return self._simple_plugin_tests('lookup', name)
+
+ if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
+ is_subdir(path, data_context().content.plugin_paths['cliconf']) or
+ is_subdir(path, data_context().content.plugin_paths['netconf'])):
+ if ext == '.py':
+ if name in self.prefixes and self.prefixes[name] == 'network':
+ network_target = 'network/%s/' % name
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['test']):
+ return self._simple_plugin_tests('test', name)
+
+ return None
+
+ def _classify_collection(self, path: str) -> t.Optional[dict[str, str]]:
+ """Return the classification for the given path using rules specific to collections."""
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ filename = os.path.basename(path)
+ dummy, ext = os.path.splitext(filename)
+
+ minimal: dict[str, str] = {}
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitignore',
+ 'COPYING',
+ 'LICENSE',
+ 'Makefile',
+ ):
+ return minimal
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None
+
+ def _classify_ansible(self, path: str) -> t.Optional[dict[str, str]]:
+ """Return the classification for the given path using rules specific to Ansible."""
+ if path.startswith('test/units/compat/'):
+ return {
+ 'units': 'test/units/',
+ }
+
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal: dict[str, str] = {}
+
+ if path.startswith('bin/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if path.startswith('examples/'):
+ if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
+ return {
+ 'windows-integration': 'connection_winrm',
+ }
+
+ return minimal
+
+ if path.startswith('hacking/'):
+ return minimal
+
+ if path.startswith('lib/ansible/executor/powershell/'):
+ units_path = 'test/units/executor/powershell/'
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if path.startswith('lib/ansible/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('licenses/'):
+ return minimal
+
+ if path.startswith('packaging/'):
+ return minimal
+
+ if path.startswith('test/ansible_test/'):
+ return minimal # these tests are not invoked from ansible-test
+
+ if path.startswith('test/lib/ansible_test/config/'):
+ if name.startswith('cloud-config-'):
+ cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/completion/'):
+ if path == 'test/lib/ansible_test/_data/completion/docker.txt':
+ return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
+
+ if path.startswith('test/lib/ansible_test/_internal/commands/integration/cloud/'):
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/lib/ansible_test/_internal/commands/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test/', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_internal/commands/units/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test/', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/requirements/'):
+ if name in (
+ 'integration',
+ 'network-integration',
+ 'windows-integration',
+ ):
+ return {
+ name: self.integration_all_target,
+ }
+
+ if name in (
+ 'sanity',
+ 'units',
+ ):
+ return {
+ name: 'all',
+ }
+
+ if path.startswith('test/lib/ansible_test/_util/controller/sanity/') or path.startswith('test/lib/ansible_test/_util/target/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test/', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_util/target/pytest/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test/', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/support/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/shippable/'):
+ if dirname == 'test/utils/shippable':
+ test_map = {
+ 'cloud.sh': 'integration:cloud/',
+ 'linux.sh': 'integration:all',
+ 'network.sh': 'network-integration:all',
+ 'remote.sh': 'integration:all',
+ 'sanity.sh': 'sanity:all',
+ 'units.sh': 'units:all',
+ 'windows.sh': 'windows-integration:all',
+ }
+
+ test_match = test_map.get(filename)
+
+ if test_match:
+ test_command, test_target = test_match.split(':')
+
+ return {
+ test_command: test_target,
+ }
+
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitattributes',
+ '.gitignore',
+ '.mailmap',
+ 'COPYING',
+ 'Makefile',
+ ):
+ return minimal
+
+ if path in (
+ 'setup.py',
+ ):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None # unknown, will result in fall-back to run all tests
+
+ def _simple_plugin_tests(self, plugin_type: str, plugin_name: str) -> dict[str, t.Optional[str]]:
+ """
+ Return tests for the given plugin type and plugin name.
+ This function is useful for plugin types which do not require special processing.
+ """
+ if plugin_name == '__init__':
+ return all_tests(self.args, True)
+
+ integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
+
+ if integration_target:
+ integration_name = integration_target.name
+ else:
+ integration_name = None
+
+ units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return dict(
+ integration=integration_name,
+ units=units_path,
+ )
+
+
+def all_tests(args: TestConfig, force: bool = False) -> dict[str, str]:
+ """Return the targets for each test command when all tests should be run."""
+ if force:
+ integration_all_target = 'all'
+ else:
+ integration_all_target = get_integration_all_target(args)
+
+ return {
+ 'sanity': 'all',
+ 'units': 'all',
+ 'integration': integration_all_target,
+ 'windows-integration': integration_all_target,
+ 'network-integration': integration_all_target,
+ }
+
+
+def get_integration_all_target(args: TestConfig) -> str:
+ """Return the target to use when all tests should be run."""
+ if isinstance(args, IntegrationConfig):
+ return args.changed_all_target
+
+ return 'all'
diff --git a/test/lib/ansible_test/_internal/classification/common.py b/test/lib/ansible_test/_internal/classification/common.py
new file mode 100644
index 0000000..a999b6e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification/common.py
@@ -0,0 +1,26 @@
+"""Common classification code used by multiple languages."""
+from __future__ import annotations
+
+import os
+
+from ..data import (
+ data_context,
+)
+
+
+def resolve_csharp_ps_util(import_name: str, path: str) -> str:
+ """Return the fully qualified name of the given import if possible, otherwise return the original import name."""
+ if data_context().content.is_ansible or not import_name.startswith('.'):
+ # We don't support relative paths for builtin utils, there's no point.
+ return import_name
+
+ packages = import_name.split('.')
+ module_packages = path.split(os.path.sep)
+
+ for package in packages:
+ if not module_packages or package:
+ break
+ del module_packages[-1]
+
+ return 'ansible_collections.%s%s' % (data_context().content.prefix,
+ '.'.join(module_packages + [p for p in packages if p]))
diff --git a/test/lib/ansible_test/_internal/classification/csharp.py b/test/lib/ansible_test/_internal/classification/csharp.py
new file mode 100644
index 0000000..edd4101
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification/csharp.py
@@ -0,0 +1,97 @@
+"""Analyze C# import statements."""
+from __future__ import annotations
+
+import os
+import re
+
+from ..io import (
+ open_text_file,
+)
+
+from ..util import (
+ display,
+)
+
+from .common import (
+ resolve_csharp_ps_util,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+
+def get_csharp_module_utils_imports(powershell_targets: list[TestTarget], csharp_targets: list[TestTarget]) -> dict[str, set[str]]:
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths."""
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
+
+ for target in csharp_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
+
+ imports: dict[str, set[str]] = {module_util: set() for module_util in module_utils}
+
+ for target_path, modules in imports_by_target_path.items():
+ for module_util in modules:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_csharp_module_utils_name(path: str) -> str:
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_csharp_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils() -> set[str]:
+ """Return a set of available module_utils imports."""
+ return set(get_csharp_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path)
+ if os.path.splitext(p)[1] == '.cs')
+
+
+def extract_csharp_module_utils_imports(path: str, module_utils: set[str], is_pure_csharp: bool) -> set[str]:
+ """Return a set of module_utils imports found in the specified source file."""
+ imports = set()
+ if is_pure_csharp:
+ pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$')
+ else:
+ pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)')
+
+ with open_text_file(path) as module_file:
+ for line_number, line in enumerate(module_file, 1):
+ match = re.search(pattern, line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/classification/powershell.py b/test/lib/ansible_test/_internal/classification/powershell.py
new file mode 100644
index 0000000..29be6d4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification/powershell.py
@@ -0,0 +1,98 @@
+"""Analyze powershell import statements."""
+from __future__ import annotations
+
+import os
+import re
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ display,
+)
+
+from .common import (
+ resolve_csharp_ps_util,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+
+def get_powershell_module_utils_imports(powershell_targets: list[TestTarget]) -> dict[str, set[str]]:
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths."""
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
+
+ imports: dict[str, set[str]] = {module_util: set() for module_util in module_utils}
+
+ for target_path, modules in imports_by_target_path.items():
+ for module_util in modules:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_powershell_module_utils_name(path: str) -> str:
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_powershell_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils() -> set[str]:
+ """Return a set of available module_utils imports."""
+ return set(get_powershell_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
+ if os.path.splitext(p)[1] == '.psm1')
+
+
+def extract_powershell_module_utils_imports(path: str, module_utils: set[str]) -> set[str]:
+ """Return a set of module_utils imports found in the specified source file."""
+ imports = set()
+
+ code = read_text_file(path)
+
+ if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
+ imports.add('Ansible.ModuleUtils.Legacy')
+
+ lines = code.splitlines()
+ line_number = 0
+
+ for line in lines:
+ line_number += 1
+ match = re.search(r'(?i)^#\s*(?:requires\s+-modules?|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/classification/python.py b/test/lib/ansible_test/_internal/classification/python.py
new file mode 100644
index 0000000..77ffeac
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification/python.py
@@ -0,0 +1,341 @@
+"""Analyze python import statements."""
+from __future__ import annotations
+
+import ast
+import os
+import re
+import typing as t
+
+from ..io import (
+ read_binary_file,
+)
+
+from ..util import (
+ display,
+ ApplicationError,
+ is_subdir,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+VIRTUAL_PACKAGES = {
+ 'ansible.module_utils.six',
+}
+
+
+def get_python_module_utils_imports(compile_targets: list[TestTarget]) -> dict[str, set[str]]:
+ """Return a dictionary of module_utils names mapped to sets of python file paths."""
+ module_utils = enumerate_module_utils()
+
+ virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
+ module_utils -= virtual_utils
+
+ imports_by_target_path = {}
+
+ for target in compile_targets:
+ imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
+
+ def recurse_import(import_name: str, depth: int = 0, seen: t.Optional[set[str]] = None) -> set[str]:
+ """Recursively expand module_utils imports from module_utils files."""
+ display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
+
+ if seen is None:
+ seen = {import_name}
+
+ results = {import_name}
+
+ # virtual packages depend on the modules they contain instead of the reverse
+ if import_name in VIRTUAL_PACKAGES:
+ for sub_import in sorted(virtual_utils):
+ if sub_import.startswith('%s.' % import_name):
+ if sub_import in seen:
+ continue
+
+ seen.add(sub_import)
+
+ matches = sorted(recurse_import(sub_import, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ import_path = get_import_path(import_name)
+
+ if import_path not in imports_by_target_path:
+ import_path = get_import_path(import_name, package=True)
+
+ if import_path not in imports_by_target_path:
+ raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
+
+ # process imports in reverse so the deepest imports come first
+ for name in sorted(imports_by_target_path[import_path], reverse=True):
+ if name in virtual_utils:
+ continue
+
+ if name in seen:
+ continue
+
+ seen.add(name)
+
+ matches = sorted(recurse_import(name, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ return results
+
+ for module_util in module_utils:
+ # recurse over module_utils imports while excluding self
+ module_util_imports = recurse_import(module_util)
+ module_util_imports.remove(module_util)
+
+ # add recursive imports to all path entries which import this module_util
+ for target_path, modules in imports_by_target_path.items():
+ if module_util in modules:
+ for module_util_import in sorted(module_util_imports):
+ if module_util_import not in modules:
+ display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
+ modules.add(module_util_import)
+
+ imports: dict[str, set[str]] = {module_util: set() for module_util in module_utils | virtual_utils}
+
+ for target_path, modules in imports_by_target_path.items():
+ for module_util in modules:
+ imports[module_util].add(target_path)
+
+ # for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
+ for virtual_util in virtual_utils:
+ parent_package = '.'.join(virtual_util.split('.')[:-1])
+ imports[virtual_util] = imports[parent_package]
+ display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ package_path = get_import_path(module_util, package=True)
+
+ if os.path.exists(package_path) and not os.path.getsize(package_path):
+ continue # ignore empty __init__.py files
+
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_python_module_utils_name(path: str) -> str:
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
+ else:
+ prefix = 'ansible.module_utils'
+
+ if path.endswith('/__init__.py'):
+ path = os.path.dirname(path)
+
+ if path == base_path:
+ name = prefix
+ else:
+ name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils() -> set[str]:
+ """Return a list of available module_utils imports."""
+ module_utils = []
+
+ for path in data_context().content.walk_files(data_context().content.module_utils_path):
+ ext = os.path.splitext(path)[1]
+
+ if ext != '.py':
+ continue
+
+ module_utils.append(get_python_module_utils_name(path))
+
+ return set(module_utils)
+
+
+def extract_python_module_utils_imports(path: str, module_utils: set[str]) -> set[str]:
+ """Return a list of module_utils imports found in the specified source file."""
+ # Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
+ # See: https://www.python.org/dev/peps/pep-0263
+ # Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
+ code = read_binary_file(path)
+
+ try:
+ tree = ast.parse(code)
+ except SyntaxError as ex:
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # The compile test will detect and report this syntax error.
+ display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
+ return set()
+
+ finder = ModuleUtilFinder(path, module_utils)
+ finder.visit(tree)
+ return finder.imports
+
+
+def get_import_path(name: str, package: bool = False) -> str:
+ """Return a path from an import name."""
+ if package:
+ filename = os.path.join(name.replace('.', '/'), '__init__.py')
+ else:
+ filename = '%s.py' % name.replace('.', '/')
+
+ if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
+ path = os.path.join('lib', filename)
+ elif data_context().content.collection and (
+ name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
+ name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
+ path = '/'.join(filename.split('/')[3:])
+ else:
+ raise Exception('Unexpected import name: %s' % name)
+
+ return path
+
+
+def path_to_module(path: str) -> str:
+ """Convert the given path to a module name."""
+ module = os.path.splitext(path)[0].replace(os.path.sep, '.')
+
+ if module.endswith('.__init__'):
+ module = module[:-9]
+
+ return module
+
+
+def relative_to_absolute(name: str, level: int, module: str, path: str, lineno: int) -> str:
+ """Convert a relative import to an absolute import."""
+ if level <= 0:
+ absolute_name = name
+ elif not module:
+ display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
+ absolute_name = 'relative.nomodule'
+ else:
+ parts = module.split('.')
+
+ if level >= len(parts):
+ display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
+ absolute_name = 'relative.abovelevel'
+ else:
+ absolute_name = '.'.join(parts[:-level] + [name])
+
+ return absolute_name
+
+
+class ModuleUtilFinder(ast.NodeVisitor):
+ """AST visitor to find valid module_utils imports."""
+ def __init__(self, path: str, module_utils: set[str]) -> None:
+ self.path = path
+ self.module_utils = module_utils
+ self.imports: set[str] = set()
+
+ # implicitly import parent package
+
+ if path.endswith('/__init__.py'):
+ path = os.path.split(path)[0]
+
+ if path.startswith('lib/ansible/module_utils/'):
+ package = os.path.split(path)[0].replace('/', '.')[4:]
+
+ if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
+ self.add_import(package, 0)
+
+ self.module = None
+
+ if data_context().content.is_ansible:
+ # Various parts of the Ansible source tree execute within different modules.
+ # To support import analysis, each file which uses relative imports must reside under a path defined here.
+ # The mapping is a tuple consisting of a path pattern to match and a replacement path.
+ # During analysis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
+ path_map = (
+ ('^hacking/build_library/build_ansible/', 'build_ansible/'),
+ ('^lib/ansible/', 'ansible/'),
+ ('^test/lib/ansible_test/_util/controller/sanity/validate-modules/', 'validate_modules/'),
+ ('^test/units/', 'test/units/'),
+ ('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
+ ('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
+ ('^test/integration/targets/.*/library/', 'ansible/modules/'),
+ )
+
+ for pattern, replacement in path_map:
+ if re.search(pattern, self.path):
+ revised_path = re.sub(pattern, replacement, self.path)
+ self.module = path_to_module(revised_path)
+ break
+ else:
+ # This assumes that all files within the collection are executed by Ansible as part of the collection.
+ # While that will usually be true, there are exceptions which will result in this resolution being incorrect.
+ self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
+
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_Import(self, node: ast.Import) -> None:
+ """Visit an import node."""
+ self.generic_visit(node)
+
+ # import ansible.module_utils.MODULE[.MODULE]
+ # import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
+ self.add_imports([alias.name for alias in node.names], node.lineno)
+
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
+ """Visit an import from node."""
+ self.generic_visit(node)
+
+ if not node.module:
+ return
+
+ module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
+
+ if not module.startswith('ansible'):
+ return
+
+ # from ansible.module_utils import MODULE[, MODULE]
+ # from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
+
+ def add_import(self, name: str, line_number: int) -> None:
+ """Record the specified import."""
+ import_name = name
+
+ while self.is_module_util_name(name):
+ if name in self.module_utils:
+ if name not in self.imports:
+ display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
+ self.imports.add(name)
+
+ return # duplicate imports are ignored
+
+ name = '.'.join(name.split('.')[:-1])
+
+ if is_subdir(self.path, data_context().content.test_path):
+ return # invalid imports in tests are ignored
+
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # This error should be detected by unit or integration tests.
+ display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
+
+ def add_imports(self, names: list[str], line_no: int) -> None:
+ """Add the given import names if they are module_utils imports."""
+ for name in names:
+ if self.is_module_util_name(name):
+ self.add_import(name, line_no)
+
+ @staticmethod
+ def is_module_util_name(name: str) -> bool:
+ """Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
+ if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
+ return True
+
+ if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/cli/__init__.py b/test/lib/ansible_test/_internal/cli/__init__.py
new file mode 100644
index 0000000..3171639
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/__init__.py
@@ -0,0 +1,63 @@
+"""Command line parsing."""
+from __future__ import annotations
+
+import argparse
+import os
+import sys
+import typing as t
+
+from .argparsing import (
+ CompositeActionCompletionFinder,
+)
+
+from .commands import (
+ do_commands,
+)
+
+from .epilog import (
+ get_epilog,
+)
+
+from .compat import (
+ HostSettings,
+ convert_legacy_args,
+)
+
+from ..util import (
+ get_ansible_version,
+)
+
+
+def parse_args(argv: t.Optional[list[str]] = None) -> argparse.Namespace:
+ """Parse command line arguments."""
+ completer = CompositeActionCompletionFinder()
+
+ parser = argparse.ArgumentParser(prog='ansible-test', epilog=get_epilog(completer), formatter_class=argparse.RawDescriptionHelpFormatter)
+ parser.add_argument('--version', action='version', version=f'%(prog)s version {get_ansible_version()}')
+
+ do_commands(parser, completer)
+
+ completer(
+ parser,
+ always_complete_options=False,
+ )
+
+ if argv is None:
+ argv = sys.argv[1:]
+ else:
+ argv = argv[1:]
+
+ args = parser.parse_args(argv)
+
+ if args.explain and not args.verbosity:
+ args.verbosity = 1
+
+ if args.no_environment:
+ pass
+ elif args.host_path:
+ args.host_settings = HostSettings.deserialize(os.path.join(args.host_path, 'settings.dat'))
+ else:
+ args.host_settings = convert_legacy_args(argv, args, args.target_mode)
+ args.host_settings.apply_defaults()
+
+ return args
diff --git a/test/lib/ansible_test/_internal/cli/actions.py b/test/lib/ansible_test/_internal/cli/actions.py
new file mode 100644
index 0000000..3359a84
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/actions.py
@@ -0,0 +1,90 @@
+"""Actions for handling composite arguments with argparse."""
+from __future__ import annotations
+
+from .argparsing import (
+ CompositeAction,
+ NamespaceParser,
+)
+
+from .parsers import (
+ DelegatedControllerParser,
+ NetworkSshTargetParser,
+ NetworkTargetParser,
+ OriginControllerParser,
+ PosixSshTargetParser,
+ PosixTargetParser,
+ SanityPythonTargetParser,
+ UnitsPythonTargetParser,
+ WindowsSshTargetParser,
+ WindowsTargetParser,
+)
+
+
+class OriginControllerAction(CompositeAction):
+ """Composite action parser for the controller when the only option is `origin`."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return OriginControllerParser()
+
+
+class DelegatedControllerAction(CompositeAction):
+ """Composite action parser for the controller when delegation is supported."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return DelegatedControllerParser()
+
+
+class PosixTargetAction(CompositeAction):
+ """Composite action parser for a POSIX target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return PosixTargetParser()
+
+
+class WindowsTargetAction(CompositeAction):
+ """Composite action parser for a Windows target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return WindowsTargetParser()
+
+
+class NetworkTargetAction(CompositeAction):
+ """Composite action parser for a network target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return NetworkTargetParser()
+
+
+class SanityPythonTargetAction(CompositeAction):
+ """Composite action parser for a sanity target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return SanityPythonTargetParser()
+
+
+class UnitsPythonTargetAction(CompositeAction):
+ """Composite action parser for a units target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return UnitsPythonTargetParser()
+
+
+class PosixSshTargetAction(CompositeAction):
+ """Composite action parser for a POSIX SSH target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return PosixSshTargetParser()
+
+
+class WindowsSshTargetAction(CompositeAction):
+ """Composite action parser for a Windows SSH target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return WindowsSshTargetParser()
+
+
+class NetworkSshTargetAction(CompositeAction):
+ """Composite action parser for a network SSH target."""
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+ return NetworkSshTargetParser()
diff --git a/test/lib/ansible_test/_internal/cli/argparsing/__init__.py b/test/lib/ansible_test/_internal/cli/argparsing/__init__.py
new file mode 100644
index 0000000..540cf55
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/argparsing/__init__.py
@@ -0,0 +1,265 @@
+"""Completion finder which brings together custom options and completion logic."""
+from __future__ import annotations
+
+import abc
+import argparse
+import os
+import re
+import typing as t
+
+from .argcompletion import (
+ OptionCompletionFinder,
+ get_comp_type,
+ register_safe_action,
+ warn,
+)
+
+from .parsers import (
+ Completion,
+ CompletionError,
+ CompletionSuccess,
+ CompletionUnavailable,
+ DocumentationState,
+ NamespaceParser,
+ Parser,
+ ParserError,
+ ParserMode,
+ ParserState,
+)
+
+
+class RegisteredCompletionFinder(OptionCompletionFinder):
+ """
+ Custom option completion finder for argcomplete which allows completion results to be registered.
+ These registered completions, if provided, are used to filter the final completion results.
+ This works around a known bug: https://github.com/kislyuk/argcomplete/issues/221
+ """
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+
+ self.registered_completions: t.Optional[list[str]] = None
+
+ def completer(
+ self,
+ prefix: str,
+ action: argparse.Action,
+ parsed_args: argparse.Namespace,
+ **kwargs,
+ ) -> list[str]:
+ """
+ Return a list of completions for the specified prefix and action.
+ Use this as the completer function for argcomplete.
+ """
+ kwargs.clear()
+ del kwargs
+
+ completions = self.get_completions(prefix, action, parsed_args)
+
+ if action.nargs and not isinstance(action.nargs, int):
+ # prevent argcomplete from including unrelated arguments in the completion results
+ self.registered_completions = completions
+
+ return completions
+
+ @abc.abstractmethod
+ def get_completions(
+ self,
+ prefix: str,
+ action: argparse.Action,
+ parsed_args: argparse.Namespace,
+ ) -> list[str]:
+ """
+ Return a list of completions for the specified prefix and action.
+ Called by the complete function.
+ """
+
+ def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
+ """Modify completion results before returning them."""
+ if self.registered_completions is not None:
+ # If one of the completion handlers registered their results, only allow those exact results to be returned.
+ # This prevents argcomplete from adding results from other completers when they are known to be invalid.
+ allowed_completions = set(self.registered_completions)
+ completions = [completion for completion in completions if completion in allowed_completions]
+
+ return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)
+
+
+class CompositeAction(argparse.Action, metaclass=abc.ABCMeta):
+ """Base class for actions that parse composite arguments."""
+ documentation_state: dict[t.Type[CompositeAction], DocumentationState] = {}
+
+ def __init__(
+ self,
+ *args,
+ **kwargs,
+ ):
+ self.definition = self.create_parser()
+ self.documentation_state[type(self)] = documentation_state = DocumentationState()
+ self.definition.document(documentation_state)
+
+ kwargs.update(dest=self.definition.dest)
+
+ super().__init__(*args, **kwargs)
+
+ register_safe_action(type(self))
+
+ @abc.abstractmethod
+ def create_parser(self) -> NamespaceParser:
+ """Return a namespace parser to parse the argument associated with this action."""
+
+ def __call__(
+ self,
+ parser,
+ namespace,
+ values,
+ option_string=None,
+ ):
+ state = ParserState(mode=ParserMode.PARSE, namespaces=[namespace], remainder=values)
+
+ try:
+ self.definition.parse(state)
+ except ParserError as ex:
+ error = str(ex)
+ except CompletionError as ex:
+ error = ex.message
+ else:
+ return
+
+ if get_comp_type():
+ # FUTURE: It may be possible to enhance error handling by surfacing this error message during downstream completion.
+ return # ignore parse errors during completion to avoid breaking downstream completion
+
+ raise argparse.ArgumentError(self, error)
+
+
+class CompositeActionCompletionFinder(RegisteredCompletionFinder):
+ """Completion finder with support for composite argument parsing."""
+ def get_completions(
+ self,
+ prefix: str,
+ action: argparse.Action,
+ parsed_args: argparse.Namespace,
+ ) -> list[str]:
+ """Return a list of completions appropriate for the given prefix and action, taking into account the arguments that have already been parsed."""
+ assert isinstance(action, CompositeAction)
+
+ state = ParserState(
+ mode=ParserMode.LIST if self.list_mode else ParserMode.COMPLETE,
+ remainder=prefix,
+ namespaces=[parsed_args],
+ )
+
+ answer = complete(action.definition, state)
+
+ completions = []
+
+ if isinstance(answer, CompletionSuccess):
+ self.disable_completion_mangling = answer.preserve
+ completions = answer.completions
+
+ if isinstance(answer, CompletionError):
+ warn(answer.message)
+
+ return completions
+
+
+def detect_file_listing(value: str, mode: ParserMode) -> bool:
+ """
+ Return True if Bash will show a file listing and redraw the prompt, otherwise return False.
+
+ If there are no list results, a file listing will be shown if the value after the last `=` or `:` character:
+
+ - is empty
+ - matches a full path
+ - matches a partial path
+
+ Otherwise Bash will play the bell sound and display nothing.
+
+ see: https://github.com/kislyuk/argcomplete/issues/328
+ see: https://github.com/kislyuk/argcomplete/pull/284
+ """
+ listing = False
+
+ if mode == ParserMode.LIST:
+ right = re.split('[=:]', value)[-1]
+ listing = not right or os.path.exists(right)
+
+ if not listing:
+ directory = os.path.dirname(right)
+
+ # noinspection PyBroadException
+ try:
+ filenames = os.listdir(directory or '.')
+ except Exception: # pylint: disable=broad-except
+ pass
+ else:
+ listing = any(filename.startswith(right) for filename in filenames)
+
+ return listing
+
+
+def detect_false_file_completion(value: str, mode: ParserMode) -> bool:
+ """
+ Return True if Bash will provide an incorrect file completion, otherwise return False.
+
+ If there are no completion results, a filename will be automatically completed if the value after the last `=` or `:` character:
+
+ - matches exactly one partial path
+
+ Otherwise Bash will play the bell sound and display nothing.
+
+ see: https://github.com/kislyuk/argcomplete/issues/328
+ see: https://github.com/kislyuk/argcomplete/pull/284
+ """
+ completion = False
+
+ if mode == ParserMode.COMPLETE:
+ completion = True
+
+ right = re.split('[=:]', value)[-1]
+ directory, prefix = os.path.split(right)
+
+ # noinspection PyBroadException
+ try:
+ filenames = os.listdir(directory or '.')
+ except Exception: # pylint: disable=broad-except
+ pass
+ else:
+ matches = [filename for filename in filenames if filename.startswith(prefix)]
+ completion = len(matches) == 1
+
+ return completion
+
+
+def complete(
+ completer: Parser,
+ state: ParserState,
+) -> Completion:
+ """Perform argument completion using the given completer and return the completion result."""
+ value = state.remainder
+
+ answer: Completion
+
+ try:
+ completer.parse(state)
+ raise ParserError('completion expected')
+ except CompletionUnavailable as ex:
+ if detect_file_listing(value, state.mode):
+ # Displaying a warning before the file listing informs the user it is invalid. Bash will redraw the prompt after the list.
+ # If the file listing is not shown, a warning could be helpful, but would introduce noise on the terminal since the prompt is not redrawn.
+ answer = CompletionError(ex.message)
+ elif detect_false_file_completion(value, state.mode):
+ # When the current prefix provides no matches, but matches files a single file on disk, Bash will perform an incorrect completion.
+ # Returning multiple invalid matches instead of no matches will prevent Bash from using its own completion logic in this case.
+ answer = CompletionSuccess(
+ list_mode=True, # abuse list mode to enable preservation of the literal results
+ consumed='',
+ continuation='',
+ matches=['completion', 'invalid']
+ )
+ else:
+ answer = ex
+ except Completion as ex:
+ answer = ex
+
+ return answer
diff --git a/test/lib/ansible_test/_internal/cli/argparsing/actions.py b/test/lib/ansible_test/_internal/cli/argparsing/actions.py
new file mode 100644
index 0000000..2bcf982
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/argparsing/actions.py
@@ -0,0 +1,18 @@
+"""Actions for argparse."""
+from __future__ import annotations
+
+import argparse
+import enum
+import typing as t
+
+
+class EnumAction(argparse.Action):
+ """Parse an enum using the lowercase enum names."""
+ def __init__(self, **kwargs: t.Any) -> None:
+ self.enum_type: t.Type[enum.Enum] = kwargs.pop('type', None)
+ kwargs.setdefault('choices', tuple(e.name.lower() for e in self.enum_type))
+ super().__init__(**kwargs)
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ value = self.enum_type[values.upper()]
+ setattr(namespace, self.dest, value)
diff --git a/test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py b/test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py
new file mode 100644
index 0000000..cf5776d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/argparsing/argcompletion.py
@@ -0,0 +1,124 @@
+"""Wrapper around argcomplete providing bug fixes and additional features."""
+from __future__ import annotations
+
+import argparse
+import enum
+import os
+import typing as t
+
+
+class Substitute:
+ """Substitute for missing class which accepts all arguments."""
+ def __init__(self, *args, **kwargs) -> None:
+ pass
+
+
+try:
+ import argcomplete
+
+ from argcomplete import (
+ CompletionFinder,
+ default_validator,
+ )
+
+ warn = argcomplete.warn # pylint: disable=invalid-name
+except ImportError:
+ argcomplete = None
+
+ CompletionFinder = Substitute
+ default_validator = Substitute # pylint: disable=invalid-name
+ warn = Substitute # pylint: disable=invalid-name
+
+
+class CompType(enum.Enum):
+ """
+ Bash COMP_TYPE argument completion types.
+ For documentation, see: https://www.gnu.org/software/bash/manual/html_node/Bash-Variables.html#index-COMP_005fTYPE
+ """
+ COMPLETION = '\t'
+ """
+ Standard completion, typically triggered by a single tab.
+ """
+ MENU_COMPLETION = '%'
+ """
+ Menu completion, which cycles through each completion instead of showing a list.
+ For help using this feature, see: https://stackoverflow.com/questions/12044574/getting-complete-and-menu-complete-to-work-together
+ """
+ LIST = '?'
+ """
+ Standard list, typically triggered by a double tab.
+ """
+ LIST_AMBIGUOUS = '!'
+ """
+ Listing with `show-all-if-ambiguous` set.
+ For documentation, see https://www.gnu.org/software/bash/manual/html_node/Readline-Init-File-Syntax.html#index-show_002dall_002dif_002dambiguous
+ For additional details, see: https://unix.stackexchange.com/questions/614123/explanation-of-bash-completion-comp-type
+ """
+ LIST_UNMODIFIED = '@'
+ """
+ Listing with `show-all-if-unmodified` set.
+ For documentation, see https://www.gnu.org/software/bash/manual/html_node/Readline-Init-File-Syntax.html#index-show_002dall_002dif_002dunmodified
+ For additional details, see: : https://unix.stackexchange.com/questions/614123/explanation-of-bash-completion-comp-type
+ """
+
+ @property
+ def list_mode(self) -> bool:
+ """True if completion is running in list mode, otherwise False."""
+ return self in (CompType.LIST, CompType.LIST_AMBIGUOUS, CompType.LIST_UNMODIFIED)
+
+
+def register_safe_action(action_type: t.Type[argparse.Action]) -> None:
+ """Register the given action as a safe action for argcomplete to use during completion if it is not already registered."""
+ if argcomplete and action_type not in argcomplete.safe_actions:
+ argcomplete.safe_actions += (action_type,)
+
+
+def get_comp_type() -> t.Optional[CompType]:
+ """Parse the COMP_TYPE environment variable (if present) and return the associated CompType enum value."""
+ value = os.environ.get('COMP_TYPE')
+ comp_type = CompType(chr(int(value))) if value else None
+ return comp_type
+
+
+class OptionCompletionFinder(CompletionFinder):
+ """
+ Custom completion finder for argcomplete.
+ It provides support for running completion in list mode, which argcomplete natively handles the same as standard completion.
+ """
+ enabled = bool(argcomplete)
+
+ def __init__(self, *args, validator=None, **kwargs) -> None:
+ if validator:
+ raise ValueError()
+
+ self.comp_type = get_comp_type()
+ self.list_mode = self.comp_type.list_mode if self.comp_type else False
+ self.disable_completion_mangling = False
+
+ finder = self
+
+ def custom_validator(completion, prefix):
+ """Completion validator used to optionally bypass validation."""
+ if finder.disable_completion_mangling:
+ return True
+
+ return default_validator(completion, prefix)
+
+ super().__init__(
+ *args,
+ validator=custom_validator,
+ **kwargs,
+ )
+
+ def __call__(self, *args, **kwargs):
+ if self.enabled:
+ super().__call__(*args, **kwargs)
+
+ def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
+ """Intercept default quoting behavior to optionally block mangling of completion entries."""
+ if self.disable_completion_mangling:
+ # Word breaks have already been handled when generating completions, don't mangle them further.
+ # This is needed in many cases when returning completion lists which lack the existing completion prefix.
+ last_wordbreak_pos = None
+
+ return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)
diff --git a/test/lib/ansible_test/_internal/cli/argparsing/parsers.py b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py
new file mode 100644
index 0000000..d07e03c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/argparsing/parsers.py
@@ -0,0 +1,597 @@
+"""General purpose composite argument parsing and completion."""
+from __future__ import annotations
+
+import abc
+import collections.abc as c
+import contextlib
+import dataclasses
+import enum
+import os
+import re
+import typing as t
+
+# NOTE: When choosing delimiters, take into account Bash and argcomplete behavior.
+#
+# Recommended characters for assignment and/or continuation: `/` `:` `=`
+#
+# The recommended assignment_character list is due to how argcomplete handles continuation characters.
+# see: https://github.com/kislyuk/argcomplete/blob/5a20d6165fbb4d4d58559378919b05964870cc16/argcomplete/__init__.py#L557-L558
+
+PAIR_DELIMITER = ','
+ASSIGNMENT_DELIMITER = '='
+PATH_DELIMITER = '/'
+
+
+# This class was originally frozen. However, that causes issues when running under Python 3.11.
+# See: https://github.com/python/cpython/issues/99856
+@dataclasses.dataclass
+class Completion(Exception):
+ """Base class for argument completion results."""
+
+
+@dataclasses.dataclass
+class CompletionUnavailable(Completion):
+ """Argument completion unavailable."""
+ message: str = 'No completions available.'
+
+
+@dataclasses.dataclass
+class CompletionError(Completion):
+ """Argument completion error."""
+ message: t.Optional[str] = None
+
+
+@dataclasses.dataclass
+class CompletionSuccess(Completion):
+ """Successful argument completion result."""
+ list_mode: bool
+ consumed: str
+ continuation: str
+ matches: list[str] = dataclasses.field(default_factory=list)
+
+ @property
+ def preserve(self) -> bool:
+ """
+ True if argcomplete should not mangle completion values, otherwise False.
+ Only used when more than one completion exists to avoid overwriting the word undergoing completion.
+ """
+ return len(self.matches) > 1 and self.list_mode
+
+ @property
+ def completions(self) -> list[str]:
+ """List of completion values to return to argcomplete."""
+ completions = self.matches
+ continuation = '' if self.list_mode else self.continuation
+
+ if not self.preserve:
+ # include the existing prefix to avoid rewriting the word undergoing completion
+ completions = [f'{self.consumed}{completion}{continuation}' for completion in completions]
+
+ return completions
+
+
+class ParserMode(enum.Enum):
+ """Mode the parser is operating in."""
+ PARSE = enum.auto()
+ COMPLETE = enum.auto()
+ LIST = enum.auto()
+
+
+class ParserError(Exception):
+ """Base class for all parsing exceptions."""
+
+
+@dataclasses.dataclass
+class ParserBoundary:
+ """Boundary details for parsing composite input."""
+ delimiters: str
+ required: bool
+ match: t.Optional[str] = None
+ ready: bool = True
+
+
+@dataclasses.dataclass
+class ParserState:
+ """State of the composite argument parser."""
+ mode: ParserMode
+ remainder: str = ''
+ consumed: str = ''
+ boundaries: list[ParserBoundary] = dataclasses.field(default_factory=list)
+ namespaces: list[t.Any] = dataclasses.field(default_factory=list)
+ parts: list[str] = dataclasses.field(default_factory=list)
+
+ @property
+ def incomplete(self) -> bool:
+ """True if parsing is incomplete (unparsed input remains), otherwise False."""
+ return self.remainder is not None
+
+ def match(self, value: str, choices: list[str]) -> bool:
+ """Return True if the given value matches the provided choices, taking into account parsing boundaries, otherwise return False."""
+ if self.current_boundary:
+ delimiters, delimiter = self.current_boundary.delimiters, self.current_boundary.match
+ else:
+ delimiters, delimiter = '', None
+
+ for choice in choices:
+ if choice.rstrip(delimiters) == choice:
+ # choice is not delimited
+ if value == choice:
+ return True # value matched
+ else:
+ # choice is delimited
+ if f'{value}{delimiter}' == choice:
+ return True # value and delimiter matched
+
+ return False
+
+ def read(self) -> str:
+ """Read and return the next input segment, taking into account parsing boundaries."""
+ delimiters = "".join(boundary.delimiters for boundary in self.boundaries)
+
+ if delimiters:
+ pattern = '([' + re.escape(delimiters) + '])'
+ regex = re.compile(pattern)
+ parts = regex.split(self.remainder, 1)
+ else:
+ parts = [self.remainder]
+
+ if len(parts) > 1:
+ value, delimiter, remainder = parts
+ else:
+ value, delimiter, remainder = parts[0], None, None
+
+ for boundary in reversed(self.boundaries):
+ if delimiter and delimiter in boundary.delimiters:
+ boundary.match = delimiter
+ self.consumed += value + delimiter
+ break
+
+ boundary.match = None
+ boundary.ready = False
+
+ if boundary.required:
+ break
+
+ self.remainder = remainder
+
+ return value
+
+ @property
+ def root_namespace(self) -> t.Any:
+ """THe root namespace."""
+ return self.namespaces[0]
+
+ @property
+ def current_namespace(self) -> t.Any:
+ """The current namespace."""
+ return self.namespaces[-1]
+
+ @property
+ def current_boundary(self) -> t.Optional[ParserBoundary]:
+ """The current parser boundary, if any, otherwise None."""
+ return self.boundaries[-1] if self.boundaries else None
+
+ def set_namespace(self, namespace: t.Any) -> None:
+ """Set the current namespace."""
+ self.namespaces.append(namespace)
+
+ @contextlib.contextmanager
+ def delimit(self, delimiters: str, required: bool = True) -> c.Iterator[ParserBoundary]:
+ """Context manager for delimiting parsing of input."""
+ boundary = ParserBoundary(delimiters=delimiters, required=required)
+
+ self.boundaries.append(boundary)
+
+ try:
+ yield boundary
+ finally:
+ self.boundaries.pop()
+
+ if boundary.required and not boundary.match:
+ raise ParserError('required delimiter not found, hit up-level delimiter or end of input instead')
+
+
+@dataclasses.dataclass
+class DocumentationState:
+ """State of the composite argument parser's generated documentation."""
+ sections: dict[str, str] = dataclasses.field(default_factory=dict)
+
+
+class Parser(metaclass=abc.ABCMeta):
+ """Base class for all composite argument parsers."""
+ @abc.abstractmethod
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ raise Exception(f'Undocumented parser: {type(self)}')
+
+
+class MatchConditions(enum.Flag):
+ """Acceptable condition(s) for matching user input to available choices."""
+ CHOICE = enum.auto()
+ """Match any choice."""
+ ANY = enum.auto()
+ """Match any non-empty string."""
+ NOTHING = enum.auto()
+ """Match an empty string which is not followed by a boundary match."""
+
+
+class DynamicChoicesParser(Parser, metaclass=abc.ABCMeta):
+ """Base class for composite argument parsers which use a list of choices that can be generated during completion."""
+ def __init__(self, conditions: MatchConditions = MatchConditions.CHOICE) -> None:
+ self.conditions = conditions
+
+ @abc.abstractmethod
+ def get_choices(self, value: str) -> list[str]:
+ """Return a list of valid choices based on the given input value."""
+
+ def no_completion_match(self, value: str) -> CompletionUnavailable: # pylint: disable=unused-argument
+ """Return an instance of CompletionUnavailable when no match was found for the given value."""
+ return CompletionUnavailable()
+
+ def no_choices_available(self, value: str) -> ParserError: # pylint: disable=unused-argument
+ """Return an instance of ParserError when parsing fails and no choices are available."""
+ return ParserError('No choices available.')
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ value = state.read()
+ choices = self.get_choices(value)
+
+ if state.mode == ParserMode.PARSE or state.incomplete:
+ if self.conditions & MatchConditions.CHOICE and state.match(value, choices):
+ return value
+
+ if self.conditions & MatchConditions.ANY and value:
+ return value
+
+ if self.conditions & MatchConditions.NOTHING and not value and state.current_boundary and not state.current_boundary.match:
+ return value
+
+ if state.mode == ParserMode.PARSE:
+ if choices:
+ raise ParserError(f'"{value}" not in: {", ".join(choices)}')
+
+ raise self.no_choices_available(value)
+
+ raise CompletionUnavailable()
+
+ matches = [choice for choice in choices if choice.startswith(value)]
+
+ if not matches:
+ raise self.no_completion_match(value)
+
+ continuation = state.current_boundary.delimiters if state.current_boundary and state.current_boundary.required else ''
+
+ raise CompletionSuccess(
+ list_mode=state.mode == ParserMode.LIST,
+ consumed=state.consumed,
+ continuation=continuation,
+ matches=matches,
+ )
+
+
+class ChoicesParser(DynamicChoicesParser):
+ """Composite argument parser which relies on a static list of choices."""
+ def __init__(self, choices: list[str], conditions: MatchConditions = MatchConditions.CHOICE) -> None:
+ self.choices = choices
+
+ super().__init__(conditions=conditions)
+
+ def get_choices(self, value: str) -> list[str]:
+ """Return a list of valid choices based on the given input value."""
+ return self.choices
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return '|'.join(self.choices)
+
+
+class EnumValueChoicesParser(ChoicesParser):
+ """Composite argument parser which relies on a static list of choices derived from the values of an enum."""
+ def __init__(self, enum_type: t.Type[enum.Enum], conditions: MatchConditions = MatchConditions.CHOICE) -> None:
+ self.enum_type = enum_type
+
+ super().__init__(choices=[str(item.value) for item in enum_type], conditions=conditions)
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ value = super().parse(state)
+ return self.enum_type(value)
+
+
+class IntegerParser(DynamicChoicesParser):
+ """Composite argument parser for integers."""
+ PATTERN = re.compile('^[1-9][0-9]*$')
+
+ def __init__(self, maximum: t.Optional[int] = None) -> None:
+ self.maximum = maximum
+
+ super().__init__()
+
+ def get_choices(self, value: str) -> list[str]:
+ """Return a list of valid choices based on the given input value."""
+ if not value:
+ numbers = list(range(1, 10))
+ elif self.PATTERN.search(value):
+ int_prefix = int(value)
+ base = int_prefix * 10
+ numbers = [int_prefix] + [base + i for i in range(0, 10)]
+ else:
+ numbers = []
+
+ # NOTE: the minimum is currently fixed at 1
+
+ if self.maximum is not None:
+ numbers = [n for n in numbers if n <= self.maximum]
+
+ return [str(n) for n in numbers]
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ value = super().parse(state)
+ return int(value)
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return '{integer}'
+
+
+class BooleanParser(ChoicesParser):
+ """Composite argument parser for boolean (yes/no) values."""
+ def __init__(self) -> None:
+ super().__init__(['yes', 'no'])
+
+ def parse(self, state: ParserState) -> bool:
+ """Parse the input from the given state and return the result."""
+ value = super().parse(state)
+ return value == 'yes'
+
+
+class AnyParser(ChoicesParser):
+ """Composite argument parser which accepts any input value."""
+ def __init__(self, nothing: bool = False, no_match_message: t.Optional[str] = None) -> None:
+ self.no_match_message = no_match_message
+
+ conditions = MatchConditions.ANY
+
+ if nothing:
+ conditions |= MatchConditions.NOTHING
+
+ super().__init__([], conditions=conditions)
+
+ def no_completion_match(self, value: str) -> CompletionUnavailable:
+ """Return an instance of CompletionUnavailable when no match was found for the given value."""
+ if self.no_match_message:
+ return CompletionUnavailable(message=self.no_match_message)
+
+ return super().no_completion_match(value)
+
+ def no_choices_available(self, value: str) -> ParserError:
+ """Return an instance of ParserError when parsing fails and no choices are available."""
+ if self.no_match_message:
+ return ParserError(self.no_match_message)
+
+ return super().no_choices_available(value)
+
+
+class RelativePathNameParser(DynamicChoicesParser):
+ """Composite argument parser for relative path names."""
+ RELATIVE_NAMES = ['.', '..']
+
+ def __init__(self, choices: list[str]) -> None:
+ self.choices = choices
+
+ super().__init__()
+
+ def get_choices(self, value: str) -> list[str]:
+ """Return a list of valid choices based on the given input value."""
+ choices = list(self.choices)
+
+ if value in self.RELATIVE_NAMES:
+ # complete relative names, but avoid suggesting them unless the current name is relative
+ # unfortunately this will be sorted in reverse of what bash presents ("../ ./" instead of "./ ../")
+ choices.extend(f'{item}{PATH_DELIMITER}' for item in self.RELATIVE_NAMES)
+
+ return choices
+
+
+class FileParser(Parser):
+ """Composite argument parser for absolute or relative file paths."""
+ def parse(self, state: ParserState) -> str:
+ """Parse the input from the given state and return the result."""
+ if state.mode == ParserMode.PARSE:
+ path = AnyParser().parse(state)
+
+ if not os.path.isfile(path):
+ raise ParserError(f'Not a file: {path}')
+ else:
+ path = ''
+
+ with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary
+ while boundary.ready:
+ directory = path or '.'
+
+ try:
+ with os.scandir(directory) as scan: # type: c.Iterator[os.DirEntry]
+ choices = [f'{item.name}{PATH_DELIMITER}' if item.is_dir() else item.name for item in scan]
+ except OSError:
+ choices = []
+
+ if not path:
+ choices.append(PATH_DELIMITER) # allow absolute paths
+ choices.append('../') # suggest relative paths
+
+ part = RelativePathNameParser(choices).parse(state)
+ path += f'{part}{boundary.match or ""}'
+
+ return path
+
+
+class AbsolutePathParser(Parser):
+ """Composite argument parser for absolute file paths. Paths are only verified for proper syntax, not for existence."""
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ path = ''
+
+ with state.delimit(PATH_DELIMITER, required=False) as boundary: # type: ParserBoundary
+ while boundary.ready:
+ if path:
+ path += AnyParser(nothing=True).parse(state)
+ else:
+ path += ChoicesParser([PATH_DELIMITER]).parse(state)
+
+ path += (boundary.match or '')
+
+ return path
+
+
+class NamespaceParser(Parser, metaclass=abc.ABCMeta):
+ """Base class for composite argument parsers that store their results in a namespace."""
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ namespace = state.current_namespace
+ current = getattr(namespace, self.dest)
+
+ if current and self.limit_one:
+ if state.mode == ParserMode.PARSE:
+ raise ParserError('Option cannot be specified more than once.')
+
+ raise CompletionError('Option cannot be specified more than once.')
+
+ value = self.get_value(state)
+
+ if self.use_list:
+ if not current:
+ current = []
+ setattr(namespace, self.dest, current)
+
+ current.append(value)
+ else:
+ setattr(namespace, self.dest, value)
+
+ return value
+
+ def get_value(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result, without storing the result in the namespace."""
+ return super().parse(state)
+
+ @property
+ def use_list(self) -> bool:
+ """True if the destination is a list, otherwise False."""
+ return False
+
+ @property
+ def limit_one(self) -> bool:
+ """True if only one target is allowed, otherwise False."""
+ return not self.use_list
+
+ @property
+ @abc.abstractmethod
+ def dest(self) -> str:
+ """The name of the attribute where the value should be stored."""
+
+
+class NamespaceWrappedParser(NamespaceParser):
+ """Composite argument parser that wraps a non-namespace parser and stores the result in a namespace."""
+ def __init__(self, dest: str, parser: Parser) -> None:
+ self._dest = dest
+ self.parser = parser
+
+ def get_value(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result, without storing the result in the namespace."""
+ return self.parser.parse(state)
+
+ @property
+ def dest(self) -> str:
+ """The name of the attribute where the value should be stored."""
+ return self._dest
+
+
+class KeyValueParser(Parser, metaclass=abc.ABCMeta):
+ """Base class for key/value composite argument parsers."""
+ @abc.abstractmethod
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ namespace = state.current_namespace
+ parsers = self.get_parsers(state)
+ keys = list(parsers)
+
+ with state.delimit(PAIR_DELIMITER, required=False) as pair: # type: ParserBoundary
+ while pair.ready:
+ with state.delimit(ASSIGNMENT_DELIMITER):
+ key = ChoicesParser(keys).parse(state)
+
+ value = parsers[key].parse(state)
+
+ setattr(namespace, key, value)
+
+ keys.remove(key)
+
+ return namespace
+
+
+class PairParser(Parser, metaclass=abc.ABCMeta):
+ """Base class for composite argument parsers consisting of a left and right argument parser, with input separated by a delimiter."""
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ namespace = self.create_namespace()
+
+ state.set_namespace(namespace)
+
+ with state.delimit(self.delimiter, self.required) as boundary: # type: ParserBoundary
+ choice = self.get_left_parser(state).parse(state)
+
+ if boundary.match:
+ self.get_right_parser(choice).parse(state)
+
+ return namespace
+
+ @property
+ def required(self) -> bool:
+ """True if the delimiter (and thus right parser) is required, otherwise False."""
+ return False
+
+ @property
+ def delimiter(self) -> str:
+ """The delimiter to use between the left and right parser."""
+ return PAIR_DELIMITER
+
+ @abc.abstractmethod
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+
+ @abc.abstractmethod
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+
+ @abc.abstractmethod
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+
+
+class TypeParser(Parser, metaclass=abc.ABCMeta):
+ """Base class for composite argument parsers which parse a type name, a colon and then parse results based on the type given by the type name."""
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]: # pylint: disable=unused-argument
+ """Return a dictionary of type names and type parsers."""
+ return self.get_stateless_parsers()
+
+ @abc.abstractmethod
+ def get_stateless_parsers(self) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ parsers = self.get_parsers(state)
+
+ with state.delimit(':'):
+ key = ChoicesParser(list(parsers)).parse(state)
+
+ value = parsers[key].parse(state)
+
+ return value
diff --git a/test/lib/ansible_test/_internal/cli/commands/__init__.py b/test/lib/ansible_test/_internal/cli/commands/__init__.py
new file mode 100644
index 0000000..2eb14ab
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/__init__.py
@@ -0,0 +1,241 @@
+"""Command line parsing for all commands."""
+from __future__ import annotations
+
+import argparse
+import functools
+import sys
+
+from ...util import (
+ display,
+)
+
+from ..completers import (
+ complete_target,
+ register_completer,
+)
+
+from ..environments import (
+ CompositeActionCompletionFinder,
+)
+
+from .coverage import (
+ do_coverage,
+)
+
+from .env import (
+ do_env,
+)
+
+from .integration import (
+ do_integration,
+)
+
+from .sanity import (
+ do_sanity,
+)
+
+from .shell import (
+ do_shell,
+)
+
+from .units import (
+ do_units,
+)
+
+
+def do_commands(
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for all commands."""
+ common = argparse.ArgumentParser(add_help=False)
+
+ common.add_argument(
+ '-e',
+ '--explain',
+ action='store_true',
+ help='explain commands that would be executed',
+ )
+
+ common.add_argument(
+ '-v',
+ '--verbose',
+ dest='verbosity',
+ action='count',
+ default=0,
+ help='display more output',
+ )
+
+ common.add_argument(
+ '--color',
+ metavar='COLOR',
+ nargs='?',
+ help='generate color output: yes, no, auto',
+ const='yes',
+ default='auto',
+ type=color,
+ )
+
+ common.add_argument(
+ '--debug',
+ action='store_true',
+ help='run ansible commands in debug mode',
+ )
+
+ common.add_argument(
+ '--truncate',
+ dest='truncate',
+ metavar='COLUMNS',
+ type=int,
+ default=display.columns,
+ help='truncate some long output (0=disabled) (default: auto)',
+ )
+
+ common.add_argument(
+ '--redact',
+ dest='redact',
+ action='store_true',
+ default=True,
+ help=argparse.SUPPRESS, # kept for backwards compatibility, but no point in advertising since it's the default
+ )
+
+ common.add_argument(
+ '--no-redact',
+ dest='redact',
+ action='store_false',
+ default=False,
+ help='show sensitive values in output',
+ )
+
+ test = argparse.ArgumentParser(add_help=False, parents=[common])
+
+ testing = test.add_argument_group(title='common testing arguments')
+
+ register_completer(testing.add_argument(
+ 'include',
+ metavar='TARGET',
+ nargs='*',
+ help='test the specified target',
+ ), functools.partial(complete_target, completer))
+
+ register_completer(testing.add_argument(
+ '--include',
+ metavar='TARGET',
+ action='append',
+ help='include the specified target',
+ ), functools.partial(complete_target, completer))
+
+ register_completer(testing.add_argument(
+ '--exclude',
+ metavar='TARGET',
+ action='append',
+ help='exclude the specified target',
+ ), functools.partial(complete_target, completer))
+
+ register_completer(testing.add_argument(
+ '--require',
+ metavar='TARGET',
+ action='append',
+ help='require the specified target',
+ ), functools.partial(complete_target, completer))
+
+ testing.add_argument(
+ '--coverage',
+ action='store_true',
+ help='analyze code coverage when running tests',
+ )
+
+ testing.add_argument(
+ '--coverage-check',
+ action='store_true',
+ help='only verify code coverage can be enabled',
+ )
+
+ testing.add_argument(
+ '--metadata',
+ help=argparse.SUPPRESS,
+ )
+
+ testing.add_argument(
+ '--base-branch',
+ metavar='BRANCH',
+ help='base branch used for change detection',
+ )
+
+ testing.add_argument(
+ '--changed',
+ action='store_true',
+ help='limit targets based on changes',
+ )
+
+ changes = test.add_argument_group(title='change detection arguments')
+
+ changes.add_argument(
+ '--tracked',
+ action='store_true',
+ help=argparse.SUPPRESS,
+ )
+
+ changes.add_argument(
+ '--untracked',
+ action='store_true',
+ help='include untracked files',
+ )
+
+ changes.add_argument(
+ '--ignore-committed',
+ dest='committed',
+ action='store_false',
+ help='exclude committed files',
+ )
+
+ changes.add_argument(
+ '--ignore-staged',
+ dest='staged',
+ action='store_false',
+ help='exclude staged files',
+ )
+
+ changes.add_argument(
+ '--ignore-unstaged',
+ dest='unstaged',
+ action='store_false',
+ help='exclude unstaged files',
+ )
+
+ changes.add_argument(
+ '--changed-from',
+ metavar='PATH',
+ help=argparse.SUPPRESS,
+ )
+
+ changes.add_argument(
+ '--changed-path',
+ metavar='PATH',
+ action='append',
+ help=argparse.SUPPRESS,
+ )
+
+ subparsers = parent.add_subparsers(metavar='COMMAND', required=True)
+
+ do_coverage(subparsers, common, completer)
+ do_env(subparsers, common, completer)
+ do_shell(subparsers, common, completer)
+
+ do_integration(subparsers, test, completer)
+ do_sanity(subparsers, test, completer)
+ do_units(subparsers, test, completer)
+
+
+def color(value: str) -> bool:
+ """Strict converter for color option."""
+ if value == 'yes':
+ return True
+
+ if value == 'no':
+ return False
+
+ if value == 'auto':
+ return sys.stdout.isatty()
+
+ raise argparse.ArgumentTypeError(f"invalid choice: '{value}' (choose from 'yes', 'no', 'auto')")
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/__init__.py b/test/lib/ansible_test/_internal/cli/commands/coverage/__init__.py
new file mode 100644
index 0000000..28e6770
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/__init__.py
@@ -0,0 +1,85 @@
+"""Command line parsing for all `coverage` commands."""
+from __future__ import annotations
+
+import argparse
+
+from ....commands.coverage import (
+ COVERAGE_GROUPS,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+)
+
+from .analyze import (
+ do_analyze,
+)
+
+from .combine import (
+ do_combine,
+)
+
+from .erase import (
+ do_erase,
+)
+
+from .html import (
+ do_html,
+)
+
+from .report import (
+ do_report,
+)
+
+from .xml import (
+ do_xml,
+)
+
+
+def do_coverage(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for all `coverage` commands."""
+ coverage_common = argparse.ArgumentParser(add_help=False, parents=[parent])
+
+ parser = subparsers.add_parser(
+ 'coverage',
+ help='code coverage management and reporting',
+ )
+
+ coverage_subparsers = parser.add_subparsers(metavar='COMMAND', required=True)
+
+ do_analyze(coverage_subparsers, coverage_common, completer)
+ do_erase(coverage_subparsers, coverage_common, completer)
+
+ do_combine(coverage_subparsers, parent, add_coverage_common, completer)
+ do_report(coverage_subparsers, parent, add_coverage_common, completer)
+ do_html(coverage_subparsers, parent, add_coverage_common, completer)
+ do_xml(coverage_subparsers, parent, add_coverage_common, completer)
+
+
+def add_coverage_common(
+ parser: argparse.ArgumentParser,
+):
+ """Add common coverage arguments."""
+ parser.add_argument(
+ '--group-by',
+ metavar='GROUP',
+ action='append',
+ choices=COVERAGE_GROUPS,
+ help='group output by: %s' % ', '.join(COVERAGE_GROUPS),
+ )
+
+ parser.add_argument(
+ '--all',
+ action='store_true',
+ help='include all python/powershell source files',
+ )
+
+ parser.add_argument(
+ '--stub',
+ action='store_true',
+ help='generate empty report of all python/powershell source files',
+ )
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/__init__.py
new file mode 100644
index 0000000..05fbd23
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/__init__.py
@@ -0,0 +1,28 @@
+"""Command line parsing for all `coverage analyze` commands."""
+from __future__ import annotations
+
+import argparse
+
+from .targets import (
+ do_targets,
+)
+
+from ....environments import (
+ CompositeActionCompletionFinder,
+)
+
+
+def do_analyze(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for all `coverage analyze` commands."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'analyze',
+ help='analyze collected coverage data',
+ )
+
+ analyze_subparsers = parser.add_subparsers(metavar='COMMAND', required=True)
+
+ do_targets(analyze_subparsers, parent, completer)
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/__init__.py
new file mode 100644
index 0000000..7b6ea3e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/__init__.py
@@ -0,0 +1,48 @@
+"""Command line parsing for all `coverage analyze targets` commands."""
+from __future__ import annotations
+
+import argparse
+
+from .....environments import (
+ CompositeActionCompletionFinder,
+)
+
+from .combine import (
+ do_combine,
+)
+
+from .expand import (
+ do_expand,
+)
+
+from .filter import (
+ do_filter,
+)
+
+from .generate import (
+ do_generate,
+)
+
+from .missing import (
+ do_missing,
+)
+
+
+def do_targets(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for all `coverage analyze targets` commands."""
+ targets = subparsers.add_parser(
+ 'targets',
+ help='analyze integration test target coverage',
+ )
+
+ targets_subparsers = targets.add_subparsers(metavar='COMMAND', required=True)
+
+ do_generate(targets_subparsers, parent, completer)
+ do_expand(targets_subparsers, parent, completer)
+ do_filter(targets_subparsers, parent, completer)
+ do_combine(targets_subparsers, parent, completer)
+ do_missing(targets_subparsers, parent, completer)
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/combine.py
new file mode 100644
index 0000000..7fa49bf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/combine.py
@@ -0,0 +1,49 @@
+"""Command line parsing for the `coverage analyze targets combine` command."""
+from __future__ import annotations
+
+import argparse
+
+from ......commands.coverage.analyze.targets.combine import (
+ command_coverage_analyze_targets_combine,
+ CoverageAnalyzeTargetsCombineConfig,
+)
+
+from .....environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_combine(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `coverage analyze targets combine` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'combine',
+ parents=[parent],
+ help='combine multiple aggregated coverage files',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_analyze_targets_combine,
+ config=CoverageAnalyzeTargetsCombineConfig,
+ )
+
+ targets_combine = parser.add_argument_group('coverage arguments')
+
+ targets_combine.add_argument(
+ 'input_file',
+ nargs='+',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_combine.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets combine
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/expand.py
new file mode 100644
index 0000000..f5f020f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/expand.py
@@ -0,0 +1,48 @@
+"""Command line parsing for the `coverage analyze targets expand` command."""
+from __future__ import annotations
+
+import argparse
+
+from ......commands.coverage.analyze.targets.expand import (
+ command_coverage_analyze_targets_expand,
+ CoverageAnalyzeTargetsExpandConfig,
+)
+
+from .....environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_expand(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `coverage analyze targets expand` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'expand',
+ parents=[parent],
+ help='expand target names from integers in aggregated coverage',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_analyze_targets_expand,
+ config=CoverageAnalyzeTargetsExpandConfig,
+ )
+
+ targets_expand = parser.add_argument_group(title='coverage arguments')
+
+ targets_expand.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_expand.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets expand
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/filter.py
new file mode 100644
index 0000000..afcb828
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/filter.py
@@ -0,0 +1,76 @@
+"""Command line parsing for the `coverage analyze targets filter` command."""
+from __future__ import annotations
+
+import argparse
+
+from ......commands.coverage.analyze.targets.filter import (
+ command_coverage_analyze_targets_filter,
+ CoverageAnalyzeTargetsFilterConfig,
+)
+
+from .....environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_filter(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `coverage analyze targets filter` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'filter',
+ parents=[parent],
+ help='filter aggregated coverage data',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_analyze_targets_filter,
+ config=CoverageAnalyzeTargetsFilterConfig,
+ )
+
+ targets_filter = parser.add_argument_group(title='coverage arguments')
+
+ targets_filter.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_filter.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ targets_filter.add_argument(
+ '--include-target',
+ metavar='TGT',
+ dest='include_targets',
+ action='append',
+ help='include the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-target',
+ metavar='TGT',
+ dest='exclude_targets',
+ action='append',
+ help='exclude the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--include-path',
+ metavar='REGEX',
+ help='include paths matching the given regex',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-path',
+ metavar='REGEX',
+ help='exclude paths matching the given regex',
+ )
+
+ add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets filter
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/generate.py
new file mode 100644
index 0000000..0d13933
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/generate.py
@@ -0,0 +1,49 @@
+"""Command line parsing for the `coverage analyze targets generate` command."""
+from __future__ import annotations
+
+import argparse
+
+from ......commands.coverage.analyze.targets.generate import (
+ command_coverage_analyze_targets_generate,
+ CoverageAnalyzeTargetsGenerateConfig,
+)
+
+from .....environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_generate(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `coverage analyze targets generate` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'generate',
+ parents=[parent],
+ help='aggregate coverage by integration test target',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_analyze_targets_generate,
+ config=CoverageAnalyzeTargetsGenerateConfig,
+ )
+
+ targets_generate = parser.add_argument_group(title='coverage arguments')
+
+ targets_generate.add_argument(
+ 'input_dir',
+ nargs='?',
+ help='directory to read coverage from',
+ )
+
+ targets_generate.add_argument(
+ 'output_file',
+ help='output file for aggregated coverage',
+ )
+
+ add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets generate
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/missing.py
new file mode 100644
index 0000000..8af236f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/analyze/targets/missing.py
@@ -0,0 +1,65 @@
+"""Command line parsing for the `coverage analyze targets missing` command."""
+from __future__ import annotations
+
+import argparse
+
+from ......commands.coverage.analyze.targets.missing import (
+ command_coverage_analyze_targets_missing,
+ CoverageAnalyzeTargetsMissingConfig,
+)
+
+from .....environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_missing(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `coverage analyze targets missing` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'missing',
+ parents=[parent],
+ help='identify coverage in one file missing in another',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_analyze_targets_missing,
+ config=CoverageAnalyzeTargetsMissingConfig,
+ )
+
+ targets_missing = parser.add_argument_group(title='coverage arguments')
+
+ targets_missing.add_argument(
+ 'from_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'to_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ targets_missing.add_argument(
+ '--only-gaps',
+ action='store_true',
+ help='report only arcs/lines not hit by any target',
+ )
+
+ targets_missing.add_argument(
+ '--only-exists',
+ action='store_true',
+ help='limit results to files that exist',
+ )
+
+ add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets missing
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/combine.py b/test/lib/ansible_test/_internal/cli/commands/coverage/combine.py
new file mode 100644
index 0000000..9b6d34a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/combine.py
@@ -0,0 +1,49 @@
+"""Command line parsing for the `coverage combine` command."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import typing as t
+
+from ....commands.coverage.combine import (
+ command_coverage_combine,
+ CoverageCombineConfig,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_combine(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for the `coverage combine` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'combine',
+ parents=[parent],
+ help='combine coverage data and rewrite remote paths',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_combine,
+ config=CoverageCombineConfig,
+ )
+
+ coverage_combine = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='coverage arguments'))
+
+ add_coverage_common(coverage_combine)
+
+ coverage_combine.add_argument(
+ '--export',
+ metavar='DIR',
+ help='directory to export combined coverage files to',
+ )
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage combine
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/erase.py b/test/lib/ansible_test/_internal/cli/commands/coverage/erase.py
new file mode 100644
index 0000000..ef356f0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/erase.py
@@ -0,0 +1,36 @@
+"""Command line parsing for the `coverage erase` command."""
+from __future__ import annotations
+
+import argparse
+
+from ....commands.coverage.erase import (
+ command_coverage_erase,
+ CoverageEraseConfig,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_erase(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for the `coverage erase` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'erase',
+ parents=[parent],
+ help='erase coverage data files',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_erase,
+ config=CoverageEraseConfig,
+ )
+
+ add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage erase
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/html.py b/test/lib/ansible_test/_internal/cli/commands/coverage/html.py
new file mode 100644
index 0000000..5f719de
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/html.py
@@ -0,0 +1,43 @@
+"""Command line parsing for the `coverage html` command."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import typing as t
+
+from ....commands.coverage.html import (
+ command_coverage_html,
+ CoverageHtmlConfig,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_html(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for the `coverage html` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'html',
+ parents=[parent],
+ help='generate html coverage report',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_html,
+ config=CoverageHtmlConfig,
+ )
+
+ coverage_combine = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='coverage arguments'))
+
+ add_coverage_common(coverage_combine)
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage html
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/report.py b/test/lib/ansible_test/_internal/cli/commands/coverage/report.py
new file mode 100644
index 0000000..e6a6e80
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/report.py
@@ -0,0 +1,61 @@
+"""Command line parsing for the `coverage report` command."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import typing as t
+
+from ....commands.coverage.report import (
+ command_coverage_report,
+ CoverageReportConfig,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_report(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for the `coverage report` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'report',
+ parents=[parent],
+ help='generate console coverage report',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_report,
+ config=CoverageReportConfig,
+ )
+
+ coverage_report = t.cast(argparse.ArgumentParser, parser.add_argument_group('coverage arguments'))
+
+ add_coverage_common(coverage_report)
+
+ coverage_report.add_argument(
+ '--show-missing',
+ action='store_true',
+ help='show line numbers of statements not executed',
+ )
+
+ coverage_report.add_argument(
+ '--include',
+ metavar='PAT[,...]',
+ help='only include paths that match a pattern (accepts quoted shell wildcards)',
+ )
+
+ coverage_report.add_argument(
+ '--omit',
+ metavar='PAT[,...]',
+ help='omit paths that match a pattern (accepts quoted shell wildcards)',
+ )
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage report
diff --git a/test/lib/ansible_test/_internal/cli/commands/coverage/xml.py b/test/lib/ansible_test/_internal/cli/commands/coverage/xml.py
new file mode 100644
index 0000000..e7b03ca
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/coverage/xml.py
@@ -0,0 +1,43 @@
+"""Command line parsing for the `coverage xml` command."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import typing as t
+
+from ....commands.coverage.xml import (
+ command_coverage_xml,
+ CoverageXmlConfig,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_xml(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ add_coverage_common: c.Callable[[argparse.ArgumentParser], None],
+ completer: CompositeActionCompletionFinder,
+) -> None:
+ """Command line parsing for the `coverage xml` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'xml',
+ parents=[parent],
+ help='generate xml coverage report',
+ )
+
+ parser.set_defaults(
+ func=command_coverage_xml,
+ config=CoverageXmlConfig,
+ )
+
+ coverage_combine = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='coverage arguments'))
+
+ add_coverage_common(coverage_combine)
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NO_TARGETS) # coverage xml
diff --git a/test/lib/ansible_test/_internal/cli/commands/env.py b/test/lib/ansible_test/_internal/cli/commands/env.py
new file mode 100644
index 0000000..0cd2114
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/env.py
@@ -0,0 +1,63 @@
+"""Command line parsing for the `env` command."""
+from __future__ import annotations
+
+import argparse
+
+from ...commands.env import (
+ EnvConfig,
+ command_env,
+)
+
+from ..environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_env(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `env` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'env',
+ parents=[parent],
+ help='show information about the test environment',
+ )
+
+ parser.set_defaults(
+ func=command_env,
+ config=EnvConfig,
+ )
+
+ env = parser.add_argument_group(title='env arguments')
+
+ env.add_argument(
+ '--show',
+ action='store_true',
+ help='show environment on stdout',
+ )
+
+ env.add_argument(
+ '--dump',
+ action='store_true',
+ help='dump environment to disk',
+ )
+
+ env.add_argument(
+ '--list-files',
+ action='store_true',
+ help='list files on stdout',
+ )
+
+ env.add_argument(
+ '--timeout',
+ type=int,
+ metavar='MINUTES',
+ help='timeout for future ansible-test commands (0 clears)',
+ )
+
+ add_environments(parser, completer, ControllerMode.NO_DELEGATION, TargetMode.NO_TARGETS) # env
diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py b/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py
new file mode 100644
index 0000000..dfdefb1
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/integration/__init__.py
@@ -0,0 +1,162 @@
+"""Command line parsing for all integration commands."""
+from __future__ import annotations
+
+import argparse
+
+from ...completers import (
+ complete_target,
+ register_completer,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+)
+
+from .network import (
+ do_network_integration,
+)
+
+from .posix import (
+ do_posix_integration,
+)
+
+from .windows import (
+ do_windows_integration,
+)
+
+
+def do_integration(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for all integration commands."""
+ parser = argparse.ArgumentParser(
+ add_help=False,
+ parents=[parent],
+ )
+
+ do_posix_integration(subparsers, parser, add_integration_common, completer)
+ do_network_integration(subparsers, parser, add_integration_common, completer)
+ do_windows_integration(subparsers, parser, add_integration_common, completer)
+
+
+def add_integration_common(
+ parser: argparse.ArgumentParser,
+):
+ """Add common integration arguments."""
+ register_completer(parser.add_argument(
+ '--start-at',
+ metavar='TARGET',
+ help='start at the specified target',
+ ), complete_target)
+
+ parser.add_argument(
+ '--start-at-task',
+ metavar='TASK',
+ help='start at the specified task',
+ )
+
+ parser.add_argument(
+ '--tags',
+ metavar='TAGS',
+ help='only run plays and tasks tagged with these values',
+ )
+
+ parser.add_argument(
+ '--skip-tags',
+ metavar='TAGS',
+ help='only run plays and tasks whose tags do not match these values',
+ )
+
+ parser.add_argument(
+ '--diff',
+ action='store_true',
+ help='show diff output',
+ )
+
+ parser.add_argument(
+ '--allow-destructive',
+ action='store_true',
+ help='allow destructive tests',
+ )
+
+ parser.add_argument(
+ '--allow-root',
+ action='store_true',
+ help='allow tests requiring root when not root',
+ )
+
+ parser.add_argument(
+ '--allow-disabled',
+ action='store_true',
+ help='allow tests which have been marked as disabled',
+ )
+
+ parser.add_argument(
+ '--allow-unstable',
+ action='store_true',
+ help='allow tests which have been marked as unstable',
+ )
+
+ parser.add_argument(
+ '--allow-unstable-changed',
+ action='store_true',
+ help='allow tests which have been marked as unstable when focused changes are detected',
+ )
+
+ parser.add_argument(
+ '--allow-unsupported',
+ action='store_true',
+ help='allow tests which have been marked as unsupported',
+ )
+
+ parser.add_argument(
+ '--retry-on-error',
+ action='store_true',
+ help='retry failed test with increased verbosity',
+ )
+
+ parser.add_argument(
+ '--continue-on-error',
+ action='store_true',
+ help='continue after failed test',
+ )
+
+ parser.add_argument(
+ '--debug-strategy',
+ action='store_true',
+ help='run test playbooks using the debug strategy',
+ )
+
+ parser.add_argument(
+ '--changed-all-target',
+ metavar='TARGET',
+ default='all',
+ help='target to run when all tests are needed',
+ )
+
+ parser.add_argument(
+ '--changed-all-mode',
+ metavar='MODE',
+ choices=('default', 'include', 'exclude'),
+ help='include/exclude behavior with --changed-all-target: %(choices)s',
+ )
+
+ parser.add_argument(
+ '--list-targets',
+ action='store_true',
+ help='list matching targets instead of running tests',
+ )
+
+ parser.add_argument(
+ '--no-temp-workdir',
+ action='store_true',
+ help='do not run tests from a temporary directory (use only for verifying broken tests)',
+ )
+
+ parser.add_argument(
+ '--no-temp-unicode',
+ action='store_true',
+ help='avoid unicode characters in temporary directory (use only for verifying broken tests)',
+ )
diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/network.py b/test/lib/ansible_test/_internal/cli/commands/integration/network.py
new file mode 100644
index 0000000..a05985b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/integration/network.py
@@ -0,0 +1,86 @@
+"""Command line parsing for the `network-integration` command."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import os
+import typing as t
+
+from ....commands.integration.network import (
+ command_network_integration,
+)
+
+from ....config import (
+ NetworkIntegrationConfig,
+)
+
+from ....target import (
+ walk_network_integration_targets,
+)
+
+from ....data import (
+ data_context,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+from ...completers import (
+ register_completer,
+)
+
+
+def do_network_integration(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ add_integration_common: c.Callable[[argparse.ArgumentParser], None],
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `network-integration` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'network-integration',
+ parents=[parent],
+ help='network integration tests',
+ )
+
+ parser.set_defaults(
+ func=command_network_integration,
+ targets_func=walk_network_integration_targets,
+ config=NetworkIntegrationConfig)
+
+ network_integration = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='network integration test arguments'))
+
+ add_integration_common(network_integration)
+
+ register_completer(network_integration.add_argument(
+ '--testcase',
+ metavar='TESTCASE',
+ help='limit a test to a specified testcase',
+ ), complete_network_testcase)
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.NETWORK_INTEGRATION) # network-integration
+
+
+def complete_network_testcase(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
+ """Return a list of test cases matching the given prefix if only one target was parsed from the command line, otherwise return an empty list."""
+ testcases = []
+
+ # since testcases are module specific, don't autocomplete if more than one
+ # module is specified
+ if len(parsed_args.include) != 1:
+ return []
+
+ target = parsed_args.include[0]
+ test_dir = os.path.join(data_context().content.integration_targets_path, target, 'tests')
+ connection_dirs = data_context().content.get_dirs(test_dir)
+
+ for connection_dir in connection_dirs:
+ for testcase in [os.path.basename(path) for path in data_context().content.get_files(connection_dir)]:
+ if testcase.startswith(prefix):
+ testcases.append(testcase.split('.', 1)[0])
+
+ return testcases
diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/posix.py b/test/lib/ansible_test/_internal/cli/commands/integration/posix.py
new file mode 100644
index 0000000..78d6165
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/integration/posix.py
@@ -0,0 +1,51 @@
+"""Command line parsing for the `integration` command."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import typing as t
+
+from ....commands.integration.posix import (
+ command_posix_integration,
+)
+
+from ....config import (
+ PosixIntegrationConfig,
+)
+
+from ....target import (
+ walk_posix_integration_targets,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_posix_integration(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ add_integration_common: c.Callable[[argparse.ArgumentParser], None],
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `integration` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'integration',
+ parents=[parent],
+ help='posix integration tests',
+ )
+
+ parser.set_defaults(
+ func=command_posix_integration,
+ targets_func=walk_posix_integration_targets,
+ config=PosixIntegrationConfig,
+ )
+
+ posix_integration = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='integration test arguments'))
+
+ add_integration_common(posix_integration)
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.POSIX_INTEGRATION) # integration
diff --git a/test/lib/ansible_test/_internal/cli/commands/integration/windows.py b/test/lib/ansible_test/_internal/cli/commands/integration/windows.py
new file mode 100644
index 0000000..ab022e3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/integration/windows.py
@@ -0,0 +1,51 @@
+"""Command line parsing for the `windows-integration` command."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import typing as t
+
+from ....commands.integration.windows import (
+ command_windows_integration,
+)
+
+from ....config import (
+ WindowsIntegrationConfig,
+)
+
+from ....target import (
+ walk_windows_integration_targets,
+)
+
+from ...environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_windows_integration(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ add_integration_common: c.Callable[[argparse.ArgumentParser], None],
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `windows-integration` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'windows-integration',
+ parents=[parent],
+ help='windows integration tests',
+ )
+
+ parser.set_defaults(
+ func=command_windows_integration,
+ targets_func=walk_windows_integration_targets,
+ config=WindowsIntegrationConfig,
+ )
+
+ windows_integration = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='windows integration test arguments'))
+
+ add_integration_common(windows_integration)
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.WINDOWS_INTEGRATION) # windows-integration
diff --git a/test/lib/ansible_test/_internal/cli/commands/sanity.py b/test/lib/ansible_test/_internal/cli/commands/sanity.py
new file mode 100644
index 0000000..8b4a9ae
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/sanity.py
@@ -0,0 +1,119 @@
+"""Command line parsing for the `sanity` command."""
+from __future__ import annotations
+
+import argparse
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...commands.sanity import (
+ command_sanity,
+ sanity_get_tests,
+)
+
+from ...target import (
+ walk_sanity_targets,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ..environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_sanity(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `sanity` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'sanity',
+ parents=[parent],
+ help='sanity tests',
+ )
+
+ parser.set_defaults(
+ func=command_sanity,
+ targets_func=walk_sanity_targets,
+ config=SanityConfig)
+
+ sanity = parser.add_argument_group(title='sanity test arguments')
+
+ sanity.add_argument(
+ '--test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to run',
+ )
+
+ sanity.add_argument(
+ '--skip-test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to skip',
+ )
+
+ sanity.add_argument(
+ '--allow-disabled',
+ action='store_true',
+ help='allow tests to run which are disabled by default',
+ )
+
+ sanity.add_argument(
+ '--list-tests',
+ action='store_true',
+ help='list available tests',
+ )
+
+ sanity.add_argument(
+ '--enable-optional-errors',
+ action='store_true',
+ help='enable optional errors',
+ )
+
+ if data_context().content.is_ansible:
+ sanity.add_argument(
+ '--keep-git',
+ action='store_true',
+ help='transfer git related files to the remote host/container',
+ )
+ else:
+ sanity.set_defaults(
+ keep_git=False,
+ )
+
+ sanity.add_argument(
+ '--lint',
+ action='store_true',
+ help='write lint output to stdout, everything else stderr',
+ )
+
+ sanity.add_argument(
+ '--junit',
+ action='store_true',
+ help='write test failures to junit xml files',
+ )
+
+ sanity.add_argument(
+ '--failure-ok',
+ action='store_true',
+ help='exit successfully on failed tests after saving results',
+ )
+
+ sanity.add_argument(
+ '--prime-venvs',
+ action='store_true',
+ help='prepare virtual environments without running tests'
+ )
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.SANITY) # sanity
diff --git a/test/lib/ansible_test/_internal/cli/commands/shell.py b/test/lib/ansible_test/_internal/cli/commands/shell.py
new file mode 100644
index 0000000..1baffc6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/shell.py
@@ -0,0 +1,59 @@
+"""Command line parsing for the `shell` command."""
+from __future__ import annotations
+
+import argparse
+
+from ...commands.shell import (
+ command_shell,
+)
+
+from ...config import (
+ ShellConfig,
+)
+
+from ..environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_shell(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `shell` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'shell',
+ parents=[parent],
+ help='open an interactive shell',
+ )
+
+ parser.set_defaults(
+ func=command_shell,
+ config=ShellConfig,
+ )
+
+ shell = parser.add_argument_group(title='shell arguments')
+
+ shell.add_argument(
+ 'cmd',
+ nargs='*',
+ help='run the specified command',
+ )
+
+ shell.add_argument(
+ '--raw',
+ action='store_true',
+ help='direct to shell with no setup',
+ )
+
+ shell.add_argument(
+ '--export',
+ metavar='PATH',
+ help='export inventory instead of opening a shell',
+ )
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.SHELL) # shell
diff --git a/test/lib/ansible_test/_internal/cli/commands/units.py b/test/lib/ansible_test/_internal/cli/commands/units.py
new file mode 100644
index 0000000..c541a87
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/commands/units.py
@@ -0,0 +1,65 @@
+"""Command line parsing for the `units` command."""
+from __future__ import annotations
+
+import argparse
+
+from ...config import (
+ UnitsConfig,
+)
+
+from ...commands.units import (
+ command_units,
+)
+
+from ...target import (
+ walk_units_targets,
+)
+
+from ..environments import (
+ CompositeActionCompletionFinder,
+ ControllerMode,
+ TargetMode,
+ add_environments,
+)
+
+
+def do_units(
+ subparsers,
+ parent: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+):
+ """Command line parsing for the `units` command."""
+ parser: argparse.ArgumentParser = subparsers.add_parser(
+ 'units',
+ parents=[parent],
+ help='unit tests',
+ )
+
+ parser.set_defaults(
+ func=command_units,
+ targets_func=walk_units_targets,
+ config=UnitsConfig,
+ )
+
+ units = parser.add_argument_group(title='unit test arguments')
+
+ units.add_argument(
+ '--collect-only',
+ action='store_true',
+ help='collect tests but do not execute them',
+ )
+
+ units.add_argument(
+ '--num-workers',
+ metavar='INT',
+ type=int,
+ help='number of workers to use (default: auto)',
+ )
+
+ units.add_argument(
+ '--requirements-mode',
+ choices=('only', 'skip'),
+ help=argparse.SUPPRESS,
+ )
+
+ add_environments(parser, completer, ControllerMode.DELEGATED, TargetMode.UNITS) # units
diff --git a/test/lib/ansible_test/_internal/cli/compat.py b/test/lib/ansible_test/_internal/cli/compat.py
new file mode 100644
index 0000000..93006d5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/compat.py
@@ -0,0 +1,505 @@
+"""Provides compatibility with first-generation host delegation options in ansible-test."""
+from __future__ import annotations
+
+import argparse
+import collections.abc as c
+import dataclasses
+import enum
+import os
+import types
+import typing as t
+
+from ..constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ filter_args,
+ sorted_versions,
+ str_to_version,
+)
+
+from ..docker_util import (
+ docker_available,
+)
+
+from ..completion import (
+ docker_completion,
+ remote_completion,
+ filter_completion,
+)
+
+from ..host_configs import (
+ ControllerConfig,
+ ControllerHostConfig,
+ DockerConfig,
+ FallbackDetail,
+ FallbackReason,
+ HostConfig,
+ HostContext,
+ HostSettings,
+ NativePythonConfig,
+ NetworkInventoryConfig,
+ NetworkRemoteConfig,
+ OriginConfig,
+ PosixRemoteConfig,
+ VirtualPythonConfig,
+ WindowsInventoryConfig,
+ WindowsRemoteConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+def filter_python(version: t.Optional[str], versions: t.Optional[c.Sequence[str]]) -> t.Optional[str]:
+ """If a Python version is given and is in the given version list, return that Python version, otherwise return None."""
+ return version if version in versions else None
+
+
+def controller_python(version: t.Optional[str]) -> t.Optional[str]:
+ """If a Python version is given and is supported by the controller, return that Python version, otherwise return None."""
+ return filter_python(version, CONTROLLER_PYTHON_VERSIONS)
+
+
+def get_fallback_remote_controller() -> str:
+ """Return the remote fallback platform for the controller."""
+ platform = 'freebsd' # lower cost than RHEL and macOS
+ candidates = [item for item in filter_completion(remote_completion()).values() if item.controller_supported and item.platform == platform]
+ fallback = sorted(candidates, key=lambda value: str_to_version(value.version), reverse=True)[0]
+ return fallback.name
+
+
+def get_option_name(name: str) -> str:
+ """Return a command-line option name from the given option name."""
+ if name == 'targets':
+ name = 'target'
+
+ return f'--{name.replace("_", "-")}'
+
+
+class PythonVersionUnsupportedError(ApplicationError):
+ """A Python version was requested for a context which does not support that version."""
+ def __init__(self, context: str, version: str, versions: c.Iterable[str]) -> None:
+ super().__init__(f'Python {version} is not supported by environment `{context}`. Supported Python version(s) are: {", ".join(versions)}')
+
+
+class PythonVersionUnspecifiedError(ApplicationError):
+ """A Python version was not specified for a context which is unknown, thus the Python version is unknown."""
+ def __init__(self, context: str) -> None:
+ super().__init__(f'A Python version was not specified for environment `{context}`. Use the `--python` option to specify a Python version.')
+
+
+class ControllerNotSupportedError(ApplicationError):
+ """Option(s) were specified which do not provide support for the controller and would be ignored because they are irrelevant for the target."""
+ def __init__(self, context: str) -> None:
+ super().__init__(f'Environment `{context}` does not provide a Python version supported by the controller.')
+
+
+class OptionsConflictError(ApplicationError):
+ """Option(s) were specified which conflict with other options."""
+ def __init__(self, first: c.Iterable[str], second: c.Iterable[str]) -> None:
+ super().__init__(f'Options `{" ".join(first)}` cannot be combined with options `{" ".join(second)}`.')
+
+
+@dataclasses.dataclass(frozen=True)
+class LegacyHostOptions:
+ """Legacy host options used prior to the availability of separate controller and target host configuration."""
+ python: t.Optional[str] = None
+ python_interpreter: t.Optional[str] = None
+ local: t.Optional[bool] = None
+ venv: t.Optional[bool] = None
+ venv_system_site_packages: t.Optional[bool] = None
+ remote: t.Optional[str] = None
+ remote_provider: t.Optional[str] = None
+ remote_arch: t.Optional[str] = None
+ docker: t.Optional[str] = None
+ docker_privileged: t.Optional[bool] = None
+ docker_seccomp: t.Optional[str] = None
+ docker_memory: t.Optional[int] = None
+ windows: t.Optional[list[str]] = None
+ platform: t.Optional[list[str]] = None
+ platform_collection: t.Optional[list[tuple[str, str]]] = None
+ platform_connection: t.Optional[list[tuple[str, str]]] = None
+ inventory: t.Optional[str] = None
+
+ @staticmethod
+ def create(namespace: t.Union[argparse.Namespace, types.SimpleNamespace]) -> LegacyHostOptions:
+ """Create legacy host options from the given namespace."""
+ kwargs = {field.name: getattr(namespace, field.name, None) for field in dataclasses.fields(LegacyHostOptions)}
+
+ if kwargs['python'] == 'default':
+ kwargs['python'] = None
+
+ return LegacyHostOptions(**kwargs)
+
+ @staticmethod
+ def purge_namespace(namespace: t.Union[argparse.Namespace, types.SimpleNamespace]) -> None:
+ """Purge legacy host options fields from the given namespace."""
+ for field in dataclasses.fields(LegacyHostOptions):
+ if hasattr(namespace, field.name):
+ delattr(namespace, field.name)
+
+ @staticmethod
+ def purge_args(args: list[str]) -> list[str]:
+ """Purge legacy host options from the given command line arguments."""
+ fields: tuple[dataclasses.Field, ...] = dataclasses.fields(LegacyHostOptions)
+ filters: dict[str, int] = {get_option_name(field.name): 0 if field.type is t.Optional[bool] else 1 for field in fields}
+
+ return filter_args(args, filters)
+
+ def get_options_used(self) -> tuple[str, ...]:
+ """Return a tuple of the command line options used."""
+ fields: tuple[dataclasses.Field, ...] = dataclasses.fields(self)
+ options = tuple(sorted(get_option_name(field.name) for field in fields if getattr(self, field.name)))
+ return options
+
+
+class TargetMode(enum.Enum):
+ """Type of provisioning to use for the targets."""
+ WINDOWS_INTEGRATION = enum.auto() # windows-integration
+ NETWORK_INTEGRATION = enum.auto() # network-integration
+ POSIX_INTEGRATION = enum.auto() # integration
+ SANITY = enum.auto() # sanity
+ UNITS = enum.auto() # units
+ SHELL = enum.auto() # shell
+ NO_TARGETS = enum.auto() # coverage
+
+ @property
+ def one_host(self) -> bool:
+ """Return True if only one host (the controller) should be used, otherwise return False."""
+ return self in (TargetMode.SANITY, TargetMode.UNITS, TargetMode.NO_TARGETS)
+
+ @property
+ def no_fallback(self) -> bool:
+ """Return True if no fallback is acceptable for the controller (due to options not applying to the target), otherwise return False."""
+ return self in (TargetMode.WINDOWS_INTEGRATION, TargetMode.NETWORK_INTEGRATION, TargetMode.NO_TARGETS)
+
+ @property
+ def multiple_pythons(self) -> bool:
+ """Return True if multiple Python versions are allowed, otherwise False."""
+ return self in (TargetMode.SANITY, TargetMode.UNITS)
+
+ @property
+ def has_python(self) -> bool:
+ """Return True if this mode uses Python, otherwise False."""
+ return self in (TargetMode.POSIX_INTEGRATION, TargetMode.SANITY, TargetMode.UNITS, TargetMode.SHELL)
+
+
+def convert_legacy_args(
+ argv: list[str],
+ args: t.Union[argparse.Namespace, types.SimpleNamespace],
+ mode: TargetMode,
+) -> HostSettings:
+ """Convert pre-split host arguments in the given namespace to their split counterparts."""
+ old_options = LegacyHostOptions.create(args)
+ old_options.purge_namespace(args)
+
+ new_options = [
+ '--controller',
+ '--target',
+ '--target-python',
+ '--target-posix',
+ '--target-windows',
+ '--target-network',
+ ]
+
+ used_old_options = old_options.get_options_used()
+ used_new_options = [name for name in new_options if name in argv]
+
+ if used_old_options:
+ if used_new_options:
+ raise OptionsConflictError(used_old_options, used_new_options)
+
+ controller, targets, controller_fallback = get_legacy_host_config(mode, old_options)
+
+ if controller_fallback:
+ if mode.one_host:
+ display.info(controller_fallback.message, verbosity=1)
+ else:
+ display.warning(controller_fallback.message)
+
+ used_default_pythons = mode in (TargetMode.SANITY, TargetMode.UNITS) and not native_python(old_options)
+ else:
+ controller = args.controller or OriginConfig()
+ controller_fallback = None
+
+ if mode == TargetMode.NO_TARGETS:
+ targets = []
+ used_default_pythons = False
+ elif args.targets:
+ targets = args.targets
+ used_default_pythons = False
+ else:
+ targets = default_targets(mode, controller)
+ used_default_pythons = mode in (TargetMode.SANITY, TargetMode.UNITS)
+
+ args.controller = controller
+ args.targets = targets
+
+ if used_default_pythons:
+ control_targets = t.cast(list[ControllerConfig], targets)
+ skipped_python_versions = sorted_versions(list(set(SUPPORTED_PYTHON_VERSIONS) - {target.python.version for target in control_targets}))
+ else:
+ skipped_python_versions = []
+
+ filtered_args = old_options.purge_args(argv)
+ filtered_args = filter_args(filtered_args, {name: 1 for name in new_options})
+
+ host_settings = HostSettings(
+ controller=controller,
+ targets=targets,
+ skipped_python_versions=skipped_python_versions,
+ filtered_args=filtered_args,
+ controller_fallback=controller_fallback,
+ )
+
+ return host_settings
+
+
+def controller_targets(
+ mode: TargetMode,
+ options: LegacyHostOptions,
+ controller: ControllerHostConfig,
+) -> list[HostConfig]:
+ """Return the configuration for controller targets."""
+ python = native_python(options)
+
+ targets: list[HostConfig]
+
+ if python:
+ targets = [ControllerConfig(python=python)]
+ else:
+ targets = default_targets(mode, controller)
+
+ return targets
+
+
+def native_python(options: LegacyHostOptions) -> t.Optional[NativePythonConfig]:
+ """Return a NativePythonConfig for the given version if it is not None, otherwise return None."""
+ if not options.python and not options.python_interpreter:
+ return None
+
+ return NativePythonConfig(version=options.python, path=options.python_interpreter)
+
+
+def get_legacy_host_config(
+ mode: TargetMode,
+ options: LegacyHostOptions,
+) -> tuple[ControllerHostConfig, list[HostConfig], t.Optional[FallbackDetail]]:
+ """
+ Returns controller and target host configs derived from the provided legacy host options.
+ The goal is to match the original behavior, by using non-split testing whenever possible.
+ When the options support the controller, use the options for the controller and use ControllerConfig for the targets.
+ When the options do not support the controller, use the options for the targets and use a default controller config influenced by the options.
+ """
+ venv_fallback = 'venv/default'
+ docker_fallback = 'default'
+ remote_fallback = get_fallback_remote_controller()
+
+ controller_fallback: t.Optional[tuple[str, str, FallbackReason]] = None
+
+ controller: t.Optional[ControllerHostConfig]
+ targets: list[HostConfig]
+
+ if options.venv:
+ if controller_python(options.python) or not options.python:
+ controller = OriginConfig(python=VirtualPythonConfig(version=options.python or 'default', system_site_packages=options.venv_system_site_packages))
+ else:
+ controller_fallback = f'origin:python={venv_fallback}', f'--venv --python {options.python}', FallbackReason.PYTHON
+ controller = OriginConfig(python=VirtualPythonConfig(version='default', system_site_packages=options.venv_system_site_packages))
+
+ if mode in (TargetMode.SANITY, TargetMode.UNITS):
+ python = native_python(options)
+
+ if python:
+ control_targets = [ControllerConfig(python=python)]
+ else:
+ control_targets = controller.get_default_targets(HostContext(controller_config=controller))
+
+ # Target sanity tests either have no Python requirements or manage their own virtual environments.
+ # Thus, there is no point in setting up virtual environments ahead of time for them.
+
+ if mode == TargetMode.UNITS:
+ targets = [ControllerConfig(python=VirtualPythonConfig(version=target.python.version, path=target.python.path,
+ system_site_packages=options.venv_system_site_packages)) for target in control_targets]
+ else:
+ targets = t.cast(list[HostConfig], control_targets)
+ else:
+ targets = [ControllerConfig(python=VirtualPythonConfig(version=options.python or 'default',
+ system_site_packages=options.venv_system_site_packages))]
+ elif options.docker:
+ docker_config = filter_completion(docker_completion()).get(options.docker)
+
+ if docker_config:
+ if options.python and options.python not in docker_config.supported_pythons:
+ raise PythonVersionUnsupportedError(f'--docker {options.docker}', options.python, docker_config.supported_pythons)
+
+ if docker_config.controller_supported:
+ if controller_python(options.python) or not options.python:
+ controller = DockerConfig(name=options.docker, python=native_python(options),
+ privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
+ targets = controller_targets(mode, options, controller)
+ else:
+ controller_fallback = f'docker:{options.docker}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
+ controller = DockerConfig(name=options.docker)
+ targets = controller_targets(mode, options, controller)
+ else:
+ controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker}', FallbackReason.ENVIRONMENT
+ controller = DockerConfig(name=docker_fallback)
+ targets = [DockerConfig(name=options.docker, python=native_python(options),
+ privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
+ else:
+ if not options.python:
+ raise PythonVersionUnspecifiedError(f'--docker {options.docker}')
+
+ if controller_python(options.python):
+ controller = DockerConfig(name=options.docker, python=native_python(options),
+ privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)
+ targets = controller_targets(mode, options, controller)
+ else:
+ controller_fallback = f'docker:{docker_fallback}', f'--docker {options.docker} --python {options.python}', FallbackReason.PYTHON
+ controller = DockerConfig(name=docker_fallback)
+ targets = [DockerConfig(name=options.docker, python=native_python(options),
+ privileged=options.docker_privileged, seccomp=options.docker_seccomp, memory=options.docker_memory)]
+ elif options.remote:
+ remote_config = filter_completion(remote_completion()).get(options.remote)
+ context, reason = None, None
+
+ if remote_config:
+ if options.python and options.python not in remote_config.supported_pythons:
+ raise PythonVersionUnsupportedError(f'--remote {options.remote}', options.python, remote_config.supported_pythons)
+
+ if remote_config.controller_supported:
+ if controller_python(options.python) or not options.python:
+ controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider,
+ arch=options.remote_arch)
+ targets = controller_targets(mode, options, controller)
+ else:
+ controller_fallback = f'remote:{options.remote}', f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON
+ controller = PosixRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch)
+ targets = controller_targets(mode, options, controller)
+ else:
+ context, reason = f'--remote {options.remote}', FallbackReason.ENVIRONMENT
+ controller = None
+ targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)]
+ elif mode == TargetMode.SHELL and options.remote.startswith('windows/'):
+ if options.python and options.python not in CONTROLLER_PYTHON_VERSIONS:
+ raise ControllerNotSupportedError(f'--python {options.python}')
+
+ controller = OriginConfig(python=native_python(options))
+ targets = [WindowsRemoteConfig(name=options.remote, provider=options.remote_provider, arch=options.remote_arch)]
+ else:
+ if not options.python:
+ raise PythonVersionUnspecifiedError(f'--remote {options.remote}')
+
+ if controller_python(options.python):
+ controller = PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)
+ targets = controller_targets(mode, options, controller)
+ else:
+ context, reason = f'--remote {options.remote} --python {options.python}', FallbackReason.PYTHON
+ controller = None
+ targets = [PosixRemoteConfig(name=options.remote, python=native_python(options), provider=options.remote_provider, arch=options.remote_arch)]
+
+ if not controller:
+ if docker_available():
+ controller_fallback = f'docker:{docker_fallback}', context, reason
+ controller = DockerConfig(name=docker_fallback)
+ else:
+ controller_fallback = f'remote:{remote_fallback}', context, reason
+ controller = PosixRemoteConfig(name=remote_fallback)
+ else: # local/unspecified
+ # There are several changes in behavior from the legacy implementation when using no delegation (or the `--local` option).
+ # These changes are due to ansible-test now maintaining consistency between its own Python and that of controller Python subprocesses.
+ #
+ # 1) The `--python-interpreter` option (if different from sys.executable) now affects controller subprocesses and triggers re-execution of ansible-test.
+ # Previously this option was completely ignored except when used with the `--docker` or `--remote` options.
+ # 2) The `--python` option now triggers re-execution of ansible-test if it differs from sys.version_info.
+ # Previously it affected Python subprocesses, but not ansible-test itself.
+
+ if controller_python(options.python) or not options.python:
+ controller = OriginConfig(python=native_python(options))
+ targets = controller_targets(mode, options, controller)
+ else:
+ controller_fallback = 'origin:python=default', f'--python {options.python}', FallbackReason.PYTHON
+ controller = OriginConfig()
+ targets = controller_targets(mode, options, controller)
+
+ if controller_fallback:
+ controller_option, context, reason = controller_fallback
+
+ if mode.no_fallback:
+ raise ControllerNotSupportedError(context)
+
+ fallback_detail = FallbackDetail(
+ reason=reason,
+ message=f'Using `--controller {controller_option}` since `{context}` does not support the controller.',
+ )
+ else:
+ fallback_detail = None
+
+ if mode.one_host and any(not isinstance(target, ControllerConfig) for target in targets):
+ raise ControllerNotSupportedError(controller_fallback[1])
+
+ if mode == TargetMode.NO_TARGETS:
+ targets = []
+ else:
+ targets = handle_non_posix_targets(mode, options, targets)
+
+ return controller, targets, fallback_detail
+
+
+def handle_non_posix_targets(
+ mode: TargetMode,
+ options: LegacyHostOptions,
+ targets: list[HostConfig],
+) -> list[HostConfig]:
+ """Return a list of non-POSIX targets if the target mode is non-POSIX."""
+ if mode == TargetMode.WINDOWS_INTEGRATION:
+ if options.windows:
+ targets = [WindowsRemoteConfig(name=f'windows/{version}', provider=options.remote_provider, arch=options.remote_arch)
+ for version in options.windows]
+ else:
+ targets = [WindowsInventoryConfig(path=options.inventory)]
+ elif mode == TargetMode.NETWORK_INTEGRATION:
+ if options.platform:
+ network_targets = [NetworkRemoteConfig(name=platform, provider=options.remote_provider, arch=options.remote_arch) for platform in options.platform]
+
+ for platform, collection in options.platform_collection or []:
+ for entry in network_targets:
+ if entry.platform == platform:
+ entry.collection = collection
+
+ for platform, connection in options.platform_connection or []:
+ for entry in network_targets:
+ if entry.platform == platform:
+ entry.connection = connection
+
+ targets = t.cast(list[HostConfig], network_targets)
+ else:
+ targets = [NetworkInventoryConfig(path=options.inventory)]
+
+ return targets
+
+
+def default_targets(
+ mode: TargetMode,
+ controller: ControllerHostConfig,
+) -> list[HostConfig]:
+ """Return a list of default targets for the given target mode."""
+ targets: list[HostConfig]
+
+ if mode == TargetMode.WINDOWS_INTEGRATION:
+ targets = [WindowsInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.winrm')))]
+ elif mode == TargetMode.NETWORK_INTEGRATION:
+ targets = [NetworkInventoryConfig(path=os.path.abspath(os.path.join(data_context().content.integration_path, 'inventory.networking')))]
+ elif mode.multiple_pythons:
+ targets = t.cast(list[HostConfig], controller.get_default_targets(HostContext(controller_config=controller)))
+ else:
+ targets = [ControllerConfig()]
+
+ return targets
diff --git a/test/lib/ansible_test/_internal/cli/completers.py b/test/lib/ansible_test/_internal/cli/completers.py
new file mode 100644
index 0000000..903b69b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/completers.py
@@ -0,0 +1,30 @@
+"""Completers for use with argcomplete."""
+from __future__ import annotations
+
+import argparse
+
+from ..target import (
+ find_target_completion,
+)
+
+from .argparsing.argcompletion import (
+ OptionCompletionFinder,
+)
+
+
+def complete_target(completer: OptionCompletionFinder, prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
+ """Perform completion for the targets configured for the command being parsed."""
+ matches = find_target_completion(parsed_args.targets_func, prefix, completer.list_mode)
+ completer.disable_completion_mangling = completer.list_mode and len(matches) > 1
+ return matches
+
+
+def complete_choices(choices: list[str], prefix: str, **_) -> list[str]:
+ """Perform completion using the provided choices."""
+ matches = [choice for choice in choices if choice.startswith(prefix)]
+ return matches
+
+
+def register_completer(action: argparse.Action, completer) -> None:
+ """Register the given completer with the specified action."""
+ action.completer = completer # type: ignore[attr-defined] # intentionally using an attribute that does not exist
diff --git a/test/lib/ansible_test/_internal/cli/converters.py b/test/lib/ansible_test/_internal/cli/converters.py
new file mode 100644
index 0000000..71e0dae
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/converters.py
@@ -0,0 +1,19 @@
+"""Converters for use as the type argument for arparse's add_argument method."""
+from __future__ import annotations
+
+import argparse
+
+
+def key_value_type(value: str) -> tuple[str, str]:
+ """Wrapper around key_value."""
+ return key_value(value)
+
+
+def key_value(value: str) -> tuple[str, str]:
+ """Type parsing and validation for argparse key/value pairs separated by an '=' character."""
+ parts = value.split('=')
+
+ if len(parts) != 2:
+ raise argparse.ArgumentTypeError('"%s" must be in the format "key=value"' % value)
+
+ return parts[0], parts[1]
diff --git a/test/lib/ansible_test/_internal/cli/environments.py b/test/lib/ansible_test/_internal/cli/environments.py
new file mode 100644
index 0000000..5063715
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/environments.py
@@ -0,0 +1,612 @@
+"""Command line parsing for test environments."""
+from __future__ import annotations
+
+import argparse
+import enum
+import functools
+import typing as t
+
+from ..constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ REMOTE_PROVIDERS,
+ SECCOMP_CHOICES,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ..util import (
+ REMOTE_ARCHITECTURES,
+)
+
+from ..completion import (
+ docker_completion,
+ network_completion,
+ remote_completion,
+ windows_completion,
+ filter_completion,
+)
+
+from ..cli.argparsing import (
+ CompositeAction,
+ CompositeActionCompletionFinder,
+)
+
+from ..cli.argparsing.actions import (
+ EnumAction,
+)
+
+from ..cli.actions import (
+ DelegatedControllerAction,
+ NetworkSshTargetAction,
+ NetworkTargetAction,
+ OriginControllerAction,
+ PosixSshTargetAction,
+ PosixTargetAction,
+ SanityPythonTargetAction,
+ UnitsPythonTargetAction,
+ WindowsSshTargetAction,
+ WindowsTargetAction,
+)
+
+from ..cli.compat import (
+ TargetMode,
+)
+
+from ..config import (
+ TerminateMode,
+)
+
+from .completers import (
+ complete_choices,
+ register_completer,
+)
+
+from .converters import (
+ key_value_type,
+)
+
+from .epilog import (
+ get_epilog,
+)
+
+from ..ci import (
+ get_ci_provider,
+)
+
+
+class ControllerMode(enum.Enum):
+ """Type of provisioning to use for the controller."""
+ NO_DELEGATION = enum.auto()
+ ORIGIN = enum.auto()
+ DELEGATED = enum.auto()
+
+
+def add_environments(
+ parser: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+ controller_mode: ControllerMode,
+ target_mode: TargetMode,
+) -> None:
+ """Add arguments for the environments used to run ansible-test and commands it invokes."""
+ no_environment = controller_mode == ControllerMode.NO_DELEGATION and target_mode == TargetMode.NO_TARGETS
+
+ parser.set_defaults(no_environment=no_environment)
+
+ if no_environment:
+ return
+
+ parser.set_defaults(target_mode=target_mode)
+
+ add_global_options(parser, controller_mode)
+ add_legacy_environment_options(parser, controller_mode, target_mode)
+ action_types = add_composite_environment_options(parser, completer, controller_mode, target_mode)
+
+ sections = [f'{heading}\n{content}'
+ for action_type, documentation_state in CompositeAction.documentation_state.items() if action_type in action_types
+ for heading, content in documentation_state.sections.items()]
+
+ if not get_ci_provider().supports_core_ci_auth():
+ sections.append('Remote provisioning options have been hidden since no Ansible Core CI API key was found.')
+
+ sections.append(get_epilog(completer))
+
+ parser.formatter_class = argparse.RawDescriptionHelpFormatter
+ parser.epilog = '\n\n'.join(sections)
+
+
+def add_global_options(
+ parser: argparse.ArgumentParser,
+ controller_mode: ControllerMode,
+):
+ """Add global options for controlling the test environment that work with both the legacy and composite options."""
+ global_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='global environment arguments'))
+
+ global_parser.add_argument(
+ '--containers',
+ metavar='JSON',
+ help=argparse.SUPPRESS,
+ )
+
+ global_parser.add_argument(
+ '--pypi-proxy',
+ action='store_true',
+ help=argparse.SUPPRESS,
+ )
+
+ global_parser.add_argument(
+ '--pypi-endpoint',
+ metavar='URI',
+ help=argparse.SUPPRESS,
+ )
+
+ global_parser.add_argument(
+ '--requirements',
+ action='store_true',
+ default=False,
+ help='install command requirements',
+ )
+
+ global_parser.add_argument(
+ '--no-pip-check',
+ action='store_true',
+ help=argparse.SUPPRESS, # deprecated, kept for now (with a warning) for backwards compatibility
+ )
+
+ add_global_remote(global_parser, controller_mode)
+ add_global_docker(global_parser, controller_mode)
+
+
+def add_composite_environment_options(
+ parser: argparse.ArgumentParser,
+ completer: CompositeActionCompletionFinder,
+ controller_mode: ControllerMode,
+ target_mode: TargetMode,
+) -> list[t.Type[CompositeAction]]:
+ """Add composite options for controlling the test environment."""
+ composite_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(
+ title='composite environment arguments (mutually exclusive with "environment arguments" above)'))
+
+ composite_parser.add_argument(
+ '--host-path',
+ help=argparse.SUPPRESS,
+ )
+
+ action_types: list[t.Type[CompositeAction]] = []
+
+ def register_action_type(action_type: t.Type[CompositeAction]) -> t.Type[CompositeAction]:
+ """Register the provided composite action type and return it."""
+ action_types.append(action_type)
+ return action_type
+
+ if controller_mode == ControllerMode.NO_DELEGATION:
+ composite_parser.set_defaults(controller=None)
+ else:
+ register_completer(composite_parser.add_argument(
+ '--controller',
+ metavar='OPT',
+ action=register_action_type(DelegatedControllerAction if controller_mode == ControllerMode.DELEGATED else OriginControllerAction),
+ help='configuration for the controller',
+ ), completer.completer)
+
+ if target_mode == TargetMode.NO_TARGETS:
+ composite_parser.set_defaults(targets=[])
+ elif target_mode == TargetMode.SHELL:
+ group = composite_parser.add_mutually_exclusive_group()
+
+ register_completer(group.add_argument(
+ '--target-posix',
+ metavar='OPT',
+ action=register_action_type(PosixSshTargetAction),
+ help='configuration for the target',
+ ), completer.completer)
+
+ suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
+
+ register_completer(group.add_argument(
+ '--target-windows',
+ metavar='OPT',
+ action=WindowsSshTargetAction if suppress else register_action_type(WindowsSshTargetAction),
+ help=suppress or 'configuration for the target',
+ ), completer.completer)
+
+ register_completer(group.add_argument(
+ '--target-network',
+ metavar='OPT',
+ action=NetworkSshTargetAction if suppress else register_action_type(NetworkSshTargetAction),
+ help=suppress or 'configuration for the target',
+ ), completer.completer)
+ else:
+ if target_mode.multiple_pythons:
+ target_option = '--target-python'
+ target_help = 'configuration for the target python interpreter(s)'
+ elif target_mode == TargetMode.POSIX_INTEGRATION:
+ target_option = '--target'
+ target_help = 'configuration for the target'
+ else:
+ target_option = '--target'
+ target_help = 'configuration for the target(s)'
+
+ target_actions = {
+ TargetMode.POSIX_INTEGRATION: PosixTargetAction,
+ TargetMode.WINDOWS_INTEGRATION: WindowsTargetAction,
+ TargetMode.NETWORK_INTEGRATION: NetworkTargetAction,
+ TargetMode.SANITY: SanityPythonTargetAction,
+ TargetMode.UNITS: UnitsPythonTargetAction,
+ }
+
+ target_action = target_actions[target_mode]
+
+ register_completer(composite_parser.add_argument(
+ target_option,
+ metavar='OPT',
+ action=register_action_type(target_action),
+ help=target_help,
+ ), completer.completer)
+
+ return action_types
+
+
+def add_legacy_environment_options(
+ parser: argparse.ArgumentParser,
+ controller_mode: ControllerMode,
+ target_mode: TargetMode,
+):
+ """Add legacy options for controlling the test environment."""
+ environment: argparse.ArgumentParser = parser.add_argument_group( # type: ignore[assignment] # real type private
+ title='environment arguments (mutually exclusive with "composite environment arguments" below)')
+
+ add_environments_python(environment, target_mode)
+ add_environments_host(environment, controller_mode, target_mode)
+
+
+def add_environments_python(
+ environments_parser: argparse.ArgumentParser,
+ target_mode: TargetMode,
+) -> None:
+ """Add environment arguments to control the Python version(s) used."""
+ python_versions: tuple[str, ...]
+
+ if target_mode.has_python:
+ python_versions = SUPPORTED_PYTHON_VERSIONS
+ else:
+ python_versions = CONTROLLER_PYTHON_VERSIONS
+
+ environments_parser.add_argument(
+ '--python',
+ metavar='X.Y',
+ choices=python_versions + ('default',),
+ help='python version: %s' % ', '.join(python_versions),
+ )
+
+ environments_parser.add_argument(
+ '--python-interpreter',
+ metavar='PATH',
+ help='path to the python interpreter',
+ )
+
+
+def add_environments_host(
+ environments_parser: argparse.ArgumentParser,
+ controller_mode: ControllerMode,
+ target_mode: TargetMode,
+) -> None:
+ """Add environment arguments for the given host and argument modes."""
+ environments_exclusive_group: argparse.ArgumentParser = environments_parser.add_mutually_exclusive_group() # type: ignore[assignment] # real type private
+
+ add_environment_local(environments_exclusive_group)
+ add_environment_venv(environments_exclusive_group, environments_parser)
+
+ if controller_mode == ControllerMode.DELEGATED:
+ add_environment_remote(environments_exclusive_group, environments_parser, target_mode)
+ add_environment_docker(environments_exclusive_group, environments_parser, target_mode)
+
+ if target_mode == TargetMode.WINDOWS_INTEGRATION:
+ add_environment_windows(environments_parser)
+
+ if target_mode == TargetMode.NETWORK_INTEGRATION:
+ add_environment_network(environments_parser)
+
+
+def add_environment_network(
+ environments_parser: argparse.ArgumentParser,
+) -> None:
+ """Add environment arguments for running on a windows host."""
+ register_completer(environments_parser.add_argument(
+ '--platform',
+ metavar='PLATFORM',
+ action='append',
+ help='network platform/version',
+ ), complete_network_platform)
+
+ register_completer(environments_parser.add_argument(
+ '--platform-collection',
+ type=key_value_type,
+ metavar='PLATFORM=COLLECTION',
+ action='append',
+ help='collection used to test platform',
+ ), complete_network_platform_collection)
+
+ register_completer(environments_parser.add_argument(
+ '--platform-connection',
+ type=key_value_type,
+ metavar='PLATFORM=CONNECTION',
+ action='append',
+ help='connection used to test platform',
+ ), complete_network_platform_connection)
+
+ environments_parser.add_argument(
+ '--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests',
+ )
+
+
+def add_environment_windows(
+ environments_parser: argparse.ArgumentParser,
+) -> None:
+ """Add environment arguments for running on a windows host."""
+ register_completer(environments_parser.add_argument(
+ '--windows',
+ metavar='VERSION',
+ action='append',
+ help='windows version',
+ ), complete_windows)
+
+ environments_parser.add_argument(
+ '--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests',
+ )
+
+
+def add_environment_local(
+ exclusive_parser: argparse.ArgumentParser,
+) -> None:
+ """Add environment arguments for running on the local (origin) host."""
+ exclusive_parser.add_argument(
+ '--local',
+ action='store_true',
+ help='run from the local environment',
+ )
+
+
+def add_environment_venv(
+ exclusive_parser: argparse.ArgumentParser,
+ environments_parser: argparse.ArgumentParser,
+) -> None:
+ """Add environment arguments for running in ansible-test managed virtual environments."""
+ exclusive_parser.add_argument(
+ '--venv',
+ action='store_true',
+ help='run from a virtual environment',
+ )
+
+ environments_parser.add_argument(
+ '--venv-system-site-packages',
+ action='store_true',
+ help='enable system site packages')
+
+
+def add_global_docker(
+ parser: argparse.ArgumentParser,
+ controller_mode: ControllerMode,
+) -> None:
+ """Add global options for Docker."""
+ if controller_mode != ControllerMode.DELEGATED:
+ parser.set_defaults(
+ docker_no_pull=False,
+ docker_network=None,
+ docker_terminate=None,
+ prime_containers=False,
+ dev_systemd_debug=False,
+ dev_probe_cgroups=None,
+ )
+
+ return
+
+ parser.add_argument(
+ '--docker-no-pull',
+ action='store_true',
+ help=argparse.SUPPRESS, # deprecated, kept for now (with a warning) for backwards compatibility
+ )
+
+ parser.add_argument(
+ '--docker-network',
+ metavar='NET',
+ help='run using the specified network',
+ )
+
+ parser.add_argument(
+ '--docker-terminate',
+ metavar='T',
+ default=TerminateMode.ALWAYS,
+ type=TerminateMode,
+ action=EnumAction,
+ help='terminate the container: %(choices)s (default: %(default)s)',
+ )
+
+ parser.add_argument(
+ '--prime-containers',
+ action='store_true',
+ help='download containers without running tests',
+ )
+
+ # Docker support isn't related to ansible-core-ci.
+ # However, ansible-core-ci support is a reasonable indicator that the user may need the `--dev-*` options.
+ suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
+
+ parser.add_argument(
+ '--dev-systemd-debug',
+ action='store_true',
+ help=suppress or 'enable systemd debugging in containers',
+ )
+
+ parser.add_argument(
+ '--dev-probe-cgroups',
+ metavar='DIR',
+ nargs='?',
+ const='',
+ help=suppress or 'probe container cgroups, with optional log dir',
+ )
+
+
+def add_environment_docker(
+ exclusive_parser: argparse.ArgumentParser,
+ environments_parser: argparse.ArgumentParser,
+ target_mode: TargetMode,
+) -> None:
+ """Add environment arguments for running in docker containers."""
+ if target_mode in (TargetMode.POSIX_INTEGRATION, TargetMode.SHELL):
+ docker_images = sorted(filter_completion(docker_completion()))
+ else:
+ docker_images = sorted(filter_completion(docker_completion(), controller_only=True))
+
+ register_completer(exclusive_parser.add_argument(
+ '--docker',
+ metavar='IMAGE',
+ nargs='?',
+ const='default',
+ help='run from a docker container',
+ ), functools.partial(complete_choices, docker_images))
+
+ environments_parser.add_argument(
+ '--docker-privileged',
+ action='store_true',
+ help='run docker container in privileged mode',
+ )
+
+ environments_parser.add_argument(
+ '--docker-seccomp',
+ metavar='SC',
+ choices=SECCOMP_CHOICES,
+ help='set seccomp confinement for the test container: %(choices)s',
+ )
+
+ environments_parser.add_argument(
+ '--docker-memory',
+ metavar='INT',
+ type=int,
+ help='memory limit for docker in bytes',
+ )
+
+
+def add_global_remote(
+ parser: argparse.ArgumentParser,
+ controller_mode: ControllerMode,
+) -> None:
+ """Add global options for remote instances."""
+ if controller_mode != ControllerMode.DELEGATED:
+ parser.set_defaults(
+ remote_stage=None,
+ remote_endpoint=None,
+ remote_terminate=None,
+ )
+
+ return
+
+ suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
+
+ register_completer(parser.add_argument(
+ '--remote-stage',
+ metavar='STAGE',
+ default='prod',
+ help=suppress or 'remote stage to use: prod, dev',
+ ), complete_remote_stage)
+
+ parser.add_argument(
+ '--remote-endpoint',
+ metavar='EP',
+ help=suppress or 'remote provisioning endpoint to use',
+ )
+
+ parser.add_argument(
+ '--remote-terminate',
+ metavar='T',
+ default=TerminateMode.NEVER,
+ type=TerminateMode,
+ action=EnumAction,
+ help=suppress or 'terminate the remote instance: %(choices)s (default: %(default)s)',
+ )
+
+
+def add_environment_remote(
+ exclusive_parser: argparse.ArgumentParser,
+ environments_parser: argparse.ArgumentParser,
+ target_mode: TargetMode,
+) -> None:
+ """Add environment arguments for running in ansible-core-ci provisioned remote virtual machines."""
+ if target_mode == TargetMode.POSIX_INTEGRATION:
+ remote_platforms = get_remote_platform_choices()
+ elif target_mode == TargetMode.SHELL:
+ remote_platforms = sorted(set(get_remote_platform_choices()) | set(get_windows_platform_choices()))
+ else:
+ remote_platforms = get_remote_platform_choices(True)
+
+ suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
+
+ register_completer(exclusive_parser.add_argument(
+ '--remote',
+ metavar='NAME',
+ help=suppress or 'run from a remote instance',
+ ), functools.partial(complete_choices, remote_platforms))
+
+ environments_parser.add_argument(
+ '--remote-provider',
+ metavar='PR',
+ choices=REMOTE_PROVIDERS,
+ help=suppress or 'remote provider to use: %(choices)s',
+ )
+
+ environments_parser.add_argument(
+ '--remote-arch',
+ metavar='ARCH',
+ choices=REMOTE_ARCHITECTURES,
+ help=suppress or 'remote arch to use: %(choices)s',
+ )
+
+
+def complete_remote_stage(prefix: str, **_) -> list[str]:
+ """Return a list of supported stages matching the given prefix."""
+ return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
+
+
+def complete_windows(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
+ """Return a list of supported Windows versions matching the given prefix, excluding versions already parsed from the command line."""
+ return [i for i in get_windows_version_choices() if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
+
+
+def complete_network_platform(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
+ """Return a list of supported network platforms matching the given prefix, excluding platforms already parsed from the command line."""
+ images = sorted(filter_completion(network_completion()))
+
+ return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
+
+
+def complete_network_platform_collection(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
+ """Return a list of supported network platforms matching the given prefix, excluding collection platforms already parsed from the command line."""
+ left = prefix.split('=')[0]
+ images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
+
+
+def complete_network_platform_connection(prefix: str, parsed_args: argparse.Namespace, **_) -> list[str]:
+ """Return a list of supported network platforms matching the given prefix, excluding connection platforms already parsed from the command line."""
+ left = prefix.split('=')[0]
+ images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
+
+
+def get_remote_platform_choices(controller: bool = False) -> list[str]:
+ """Return a list of supported remote platforms matching the given prefix."""
+ return sorted(filter_completion(remote_completion(), controller_only=controller))
+
+
+def get_windows_platform_choices() -> list[str]:
+ """Return a list of supported Windows versions matching the given prefix."""
+ return sorted(f'windows/{windows.version}' for windows in filter_completion(windows_completion()).values())
+
+
+def get_windows_version_choices() -> list[str]:
+ """Return a list of supported Windows versions."""
+ return sorted(windows.version for windows in filter_completion(windows_completion()).values())
diff --git a/test/lib/ansible_test/_internal/cli/epilog.py b/test/lib/ansible_test/_internal/cli/epilog.py
new file mode 100644
index 0000000..3800ff1
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/epilog.py
@@ -0,0 +1,23 @@
+"""Argument parsing epilog generation."""
+from __future__ import annotations
+
+from .argparsing import (
+ CompositeActionCompletionFinder,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+def get_epilog(completer: CompositeActionCompletionFinder) -> str:
+ """Generate and return the epilog to use for help output."""
+ if completer.enabled:
+ epilog = 'Tab completion available using the "argcomplete" python package.'
+ else:
+ epilog = 'Install the "argcomplete" python package to enable tab completion.'
+
+ if data_context().content.unsupported:
+ epilog += '\n\n' + data_context().explain_working_directory()
+
+ return epilog
diff --git a/test/lib/ansible_test/_internal/cli/parsers/__init__.py b/test/lib/ansible_test/_internal/cli/parsers/__init__.py
new file mode 100644
index 0000000..1aedf63
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/parsers/__init__.py
@@ -0,0 +1,303 @@
+"""Composite argument parsers for ansible-test specific command-line arguments."""
+from __future__ import annotations
+
+import typing as t
+
+from ...constants import (
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ...ci import (
+ get_ci_provider,
+)
+
+from ...host_configs import (
+ ControllerConfig,
+ NetworkConfig,
+ NetworkInventoryConfig,
+ PosixConfig,
+ WindowsConfig,
+ WindowsInventoryConfig,
+)
+
+from ..argparsing.parsers import (
+ DocumentationState,
+ Parser,
+ ParserState,
+ TypeParser,
+)
+
+from .value_parsers import (
+ PythonParser,
+)
+
+from .host_config_parsers import (
+ ControllerParser,
+ DockerParser,
+ NetworkInventoryParser,
+ NetworkRemoteParser,
+ OriginParser,
+ PosixRemoteParser,
+ PosixSshParser,
+ WindowsInventoryParser,
+ WindowsRemoteParser,
+)
+
+
+from .base_argument_parsers import (
+ ControllerNamespaceParser,
+ TargetNamespaceParser,
+ TargetsNamespaceParser,
+)
+
+
+class OriginControllerParser(ControllerNamespaceParser, TypeParser):
+ """Composite argument parser for the controller when delegation is not supported."""
+ def get_stateless_parsers(self) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ return dict(
+ origin=OriginParser(),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section = '--controller options:'
+
+ state.sections[section] = '' # place this section before the sections created by the parsers below
+ state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
+
+ return None
+
+
+class DelegatedControllerParser(ControllerNamespaceParser, TypeParser):
+ """Composite argument parser for the controller when delegation is supported."""
+ def get_stateless_parsers(self) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ parsers: dict[str, Parser] = dict(
+ origin=OriginParser(),
+ docker=DockerParser(controller=True),
+ )
+
+ if get_ci_provider().supports_core_ci_auth():
+ parsers.update(
+ remote=PosixRemoteParser(controller=True),
+ )
+
+ return parsers
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section = '--controller options:'
+
+ state.sections[section] = '' # place this section before the sections created by the parsers below
+ state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
+
+ return None
+
+
+class PosixTargetParser(TargetNamespaceParser, TypeParser):
+ """Composite argument parser for a POSIX target."""
+ def get_stateless_parsers(self) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ parsers: dict[str, Parser] = dict(
+ controller=ControllerParser(),
+ docker=DockerParser(controller=False),
+ )
+
+ if get_ci_provider().supports_core_ci_auth():
+ parsers.update(
+ remote=PosixRemoteParser(controller=False),
+ )
+
+ parsers.update(
+ ssh=PosixSshParser(),
+ )
+
+ return parsers
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section = f'{self.option_name} options (choose one):'
+
+ state.sections[section] = '' # place this section before the sections created by the parsers below
+ state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
+
+ return None
+
+
+class WindowsTargetParser(TargetsNamespaceParser, TypeParser):
+ """Composite argument parser for a Windows target."""
+ @property
+ def allow_inventory(self) -> bool:
+ """True if inventory is allowed, otherwise False."""
+ return True
+
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ return self.get_internal_parsers(state.root_namespace.targets)
+
+ def get_stateless_parsers(self) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ return self.get_internal_parsers([])
+
+ def get_internal_parsers(self, targets: list[WindowsConfig]) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ parsers: dict[str, Parser] = {}
+
+ if self.allow_inventory and not targets:
+ parsers.update(
+ inventory=WindowsInventoryParser(),
+ )
+
+ if not targets or not any(isinstance(target, WindowsInventoryConfig) for target in targets):
+ if get_ci_provider().supports_core_ci_auth():
+ parsers.update(
+ remote=WindowsRemoteParser(),
+ )
+
+ return parsers
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section = f'{self.option_name} options (choose one):'
+
+ state.sections[section] = '' # place this section before the sections created by the parsers below
+ state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
+
+ return None
+
+
+class NetworkTargetParser(TargetsNamespaceParser, TypeParser):
+ """Composite argument parser for a network target."""
+ @property
+ def allow_inventory(self) -> bool:
+ """True if inventory is allowed, otherwise False."""
+ return True
+
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ return self.get_internal_parsers(state.root_namespace.targets)
+
+ def get_stateless_parsers(self) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ return self.get_internal_parsers([])
+
+ def get_internal_parsers(self, targets: list[NetworkConfig]) -> dict[str, Parser]:
+ """Return a dictionary of type names and type parsers."""
+ parsers: dict[str, Parser] = {}
+
+ if self.allow_inventory and not targets:
+ parsers.update(
+ inventory=NetworkInventoryParser(),
+ )
+
+ if not targets or not any(isinstance(target, NetworkInventoryConfig) for target in targets):
+ if get_ci_provider().supports_core_ci_auth():
+ parsers.update(
+ remote=NetworkRemoteParser(),
+ )
+
+ return parsers
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section = f'{self.option_name} options (choose one):'
+
+ state.sections[section] = '' # place this section before the sections created by the parsers below
+ state.sections[section] = '\n'.join([f' {name}:{parser.document(state)}' for name, parser in self.get_stateless_parsers().items()])
+
+ return None
+
+
+class PythonTargetParser(TargetsNamespaceParser, Parser):
+ """Composite argument parser for a Python target."""
+ def __init__(self, allow_venv: bool) -> None:
+ super().__init__()
+
+ self.allow_venv = allow_venv
+
+ @property
+ def option_name(self) -> str:
+ """The option name used for this parser."""
+ return '--target-python'
+
+ def get_value(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result, without storing the result in the namespace."""
+ versions = list(SUPPORTED_PYTHON_VERSIONS)
+
+ for target in state.root_namespace.targets or []: # type: PosixConfig
+ versions.remove(target.python.version)
+
+ parser = PythonParser(versions, allow_venv=self.allow_venv, allow_default=True)
+ python = parser.parse(state)
+
+ value = ControllerConfig(python=python)
+
+ return value
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section = f'{self.option_name} options (choose one):'
+
+ state.sections[section] = '\n'.join([
+ f' {PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=True).document(state)} # non-origin controller',
+ f' {PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=True, allow_default=True).document(state)} # origin controller',
+ ])
+
+ return None
+
+
+class SanityPythonTargetParser(PythonTargetParser):
+ """Composite argument parser for a sanity Python target."""
+ def __init__(self) -> None:
+ super().__init__(allow_venv=False)
+
+
+class UnitsPythonTargetParser(PythonTargetParser):
+ """Composite argument parser for a units Python target."""
+ def __init__(self) -> None:
+ super().__init__(allow_venv=True)
+
+
+class PosixSshTargetParser(PosixTargetParser):
+ """Composite argument parser for a POSIX SSH target."""
+ @property
+ def option_name(self) -> str:
+ """The option name used for this parser."""
+ return '--target-posix'
+
+
+class WindowsSshTargetParser(WindowsTargetParser):
+ """Composite argument parser for a Windows SSH target."""
+ @property
+ def option_name(self) -> str:
+ """The option name used for this parser."""
+ return '--target-windows'
+
+ @property
+ def allow_inventory(self) -> bool:
+ """True if inventory is allowed, otherwise False."""
+ return False
+
+ @property
+ def limit_one(self) -> bool:
+ """True if only one target is allowed, otherwise False."""
+ return True
+
+
+class NetworkSshTargetParser(NetworkTargetParser):
+ """Composite argument parser for a network SSH target."""
+ @property
+ def option_name(self) -> str:
+ """The option name used for this parser."""
+ return '--target-network'
+
+ @property
+ def allow_inventory(self) -> bool:
+ """True if inventory is allowed, otherwise False."""
+ return False
+
+ @property
+ def limit_one(self) -> bool:
+ """True if only one target is allowed, otherwise False."""
+ return True
diff --git a/test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py
new file mode 100644
index 0000000..aac7a69
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/parsers/base_argument_parsers.py
@@ -0,0 +1,73 @@
+"""Base classes for the primary parsers for composite command line arguments."""
+from __future__ import annotations
+
+import abc
+import typing as t
+
+from ..argparsing.parsers import (
+ CompletionError,
+ NamespaceParser,
+ ParserState,
+)
+
+
+class ControllerNamespaceParser(NamespaceParser, metaclass=abc.ABCMeta):
+ """Base class for controller namespace parsers."""
+ @property
+ def dest(self) -> str:
+ """The name of the attribute where the value should be stored."""
+ return 'controller'
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ if state.root_namespace.targets:
+ raise ControllerRequiredFirstError()
+
+ return super().parse(state)
+
+
+class TargetNamespaceParser(NamespaceParser, metaclass=abc.ABCMeta):
+ """Base class for target namespace parsers involving a single target."""
+ @property
+ def option_name(self) -> str:
+ """The option name used for this parser."""
+ return '--target'
+
+ @property
+ def dest(self) -> str:
+ """The name of the attribute where the value should be stored."""
+ return 'targets'
+
+ @property
+ def use_list(self) -> bool:
+ """True if the destination is a list, otherwise False."""
+ return True
+
+ @property
+ def limit_one(self) -> bool:
+ """True if only one target is allowed, otherwise False."""
+ return True
+
+
+class TargetsNamespaceParser(NamespaceParser, metaclass=abc.ABCMeta):
+ """Base class for controller namespace parsers involving multiple targets."""
+ @property
+ def option_name(self) -> str:
+ """The option name used for this parser."""
+ return '--target'
+
+ @property
+ def dest(self) -> str:
+ """The name of the attribute where the value should be stored."""
+ return 'targets'
+
+ @property
+ def use_list(self) -> bool:
+ """True if the destination is a list, otherwise False."""
+ return True
+
+
+class ControllerRequiredFirstError(CompletionError):
+ """Exception raised when controller and target options are specified out-of-order."""
+ def __init__(self) -> None:
+ super().__init__('The `--controller` option must be specified before `--target` option(s).')
diff --git a/test/lib/ansible_test/_internal/cli/parsers/helpers.py b/test/lib/ansible_test/_internal/cli/parsers/helpers.py
new file mode 100644
index 0000000..836a893
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/parsers/helpers.py
@@ -0,0 +1,57 @@
+"""Helper functions for composite parsers."""
+from __future__ import annotations
+
+from ...constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ...completion import (
+ docker_completion,
+ remote_completion,
+ filter_completion,
+)
+
+from ...host_configs import (
+ DockerConfig,
+ HostConfig,
+ PosixRemoteConfig,
+)
+
+
+def get_docker_pythons(name: str, controller: bool, strict: bool) -> list[str]:
+ """Return a list of docker instance Python versions supported by the specified host config."""
+ image_config = filter_completion(docker_completion()).get(name)
+ available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
+
+ if not image_config:
+ return [] if strict else list(available_pythons)
+
+ supported_pythons = [python for python in image_config.supported_pythons if python in available_pythons]
+
+ return supported_pythons
+
+
+def get_remote_pythons(name: str, controller: bool, strict: bool) -> list[str]:
+ """Return a list of remote instance Python versions supported by the specified host config."""
+ platform_config = filter_completion(remote_completion()).get(name)
+ available_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
+
+ if not platform_config:
+ return [] if strict else list(available_pythons)
+
+ supported_pythons = [python for python in platform_config.supported_pythons if python in available_pythons]
+
+ return supported_pythons
+
+
+def get_controller_pythons(controller_config: HostConfig, strict: bool) -> list[str]:
+ """Return a list of controller Python versions supported by the specified host config."""
+ if isinstance(controller_config, DockerConfig):
+ pythons = get_docker_pythons(controller_config.name, False, strict)
+ elif isinstance(controller_config, PosixRemoteConfig):
+ pythons = get_remote_pythons(controller_config.name, False, strict)
+ else:
+ pythons = list(SUPPORTED_PYTHON_VERSIONS)
+
+ return pythons
diff --git a/test/lib/ansible_test/_internal/cli/parsers/host_config_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/host_config_parsers.py
new file mode 100644
index 0000000..ee6f146
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/parsers/host_config_parsers.py
@@ -0,0 +1,310 @@
+"""Composite parsers for the various types of hosts."""
+from __future__ import annotations
+
+import typing as t
+
+from ...completion import (
+ docker_completion,
+ network_completion,
+ remote_completion,
+ windows_completion,
+ filter_completion,
+)
+
+from ...host_configs import (
+ ControllerConfig,
+ DockerConfig,
+ NetworkInventoryConfig,
+ NetworkRemoteConfig,
+ OriginConfig,
+ PosixRemoteConfig,
+ PosixSshConfig,
+ WindowsInventoryConfig,
+ WindowsRemoteConfig,
+)
+
+from ..compat import (
+ get_fallback_remote_controller,
+)
+
+from ..argparsing.parsers import (
+ ChoicesParser,
+ DocumentationState,
+ FileParser,
+ MatchConditions,
+ NamespaceWrappedParser,
+ PairParser,
+ Parser,
+ ParserError,
+ ParserState,
+)
+
+from .value_parsers import (
+ PlatformParser,
+ SshConnectionParser,
+)
+
+from .key_value_parsers import (
+ ControllerKeyValueParser,
+ DockerKeyValueParser,
+ EmptyKeyValueParser,
+ NetworkRemoteKeyValueParser,
+ OriginKeyValueParser,
+ PosixRemoteKeyValueParser,
+ PosixSshKeyValueParser,
+ WindowsRemoteKeyValueParser,
+)
+
+from .helpers import (
+ get_docker_pythons,
+ get_remote_pythons,
+)
+
+
+class OriginParser(Parser):
+ """Composite argument parser for the origin."""
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ namespace = OriginConfig()
+
+ state.set_namespace(namespace)
+
+ parser = OriginKeyValueParser()
+ parser.parse(state)
+
+ return namespace
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return OriginKeyValueParser().document(state)
+
+
+class ControllerParser(Parser):
+ """Composite argument parser for the controller."""
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ namespace = ControllerConfig()
+
+ state.set_namespace(namespace)
+
+ parser = ControllerKeyValueParser()
+ parser.parse(state)
+
+ return namespace
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return ControllerKeyValueParser().document(state)
+
+
+class DockerParser(PairParser):
+ """Composite argument parser for a docker host."""
+ def __init__(self, controller: bool) -> None:
+ self.controller = controller
+
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+ return DockerConfig()
+
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+ return NamespaceWrappedParser('name', ChoicesParser(list(filter_completion(docker_completion(), controller_only=self.controller)),
+ conditions=MatchConditions.CHOICE | MatchConditions.ANY))
+
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+ return DockerKeyValueParser(choice, self.controller)
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ value: DockerConfig = super().parse(state)
+
+ if not value.python and not get_docker_pythons(value.name, self.controller, True):
+ raise ParserError(f'Python version required for docker image: {value.name}')
+
+ return value
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ default = 'default'
+ content = '\n'.join([f' {image} ({", ".join(get_docker_pythons(image, self.controller, False))})'
+ for image, item in filter_completion(docker_completion(), controller_only=self.controller).items()])
+
+ content += '\n'.join([
+ '',
+ ' {image} # python must be specified for custom images',
+ ])
+
+ state.sections[f'{"controller" if self.controller else "target"} docker images and supported python version (choose one):'] = content
+
+ return f'{{image}}[,{DockerKeyValueParser(default, self.controller).document(state)}]'
+
+
+class PosixRemoteParser(PairParser):
+ """Composite argument parser for a POSIX remote host."""
+ def __init__(self, controller: bool) -> None:
+ self.controller = controller
+
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+ return PosixRemoteConfig()
+
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+ return NamespaceWrappedParser('name', PlatformParser(list(filter_completion(remote_completion(), controller_only=self.controller))))
+
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+ return PosixRemoteKeyValueParser(choice, self.controller)
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ value: PosixRemoteConfig = super().parse(state)
+
+ if not value.python and not get_remote_pythons(value.name, self.controller, True):
+ raise ParserError(f'Python version required for remote: {value.name}')
+
+ return value
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ default = get_fallback_remote_controller()
+ content = '\n'.join([f' {name} ({", ".join(get_remote_pythons(name, self.controller, False))})'
+ for name, item in filter_completion(remote_completion(), controller_only=self.controller).items()])
+
+ content += '\n'.join([
+ '',
+ ' {platform}/{version} # python must be specified for unknown systems',
+ ])
+
+ state.sections[f'{"controller" if self.controller else "target"} remote systems and supported python versions (choose one):'] = content
+
+ return f'{{system}}[,{PosixRemoteKeyValueParser(default, self.controller).document(state)}]'
+
+
+class WindowsRemoteParser(PairParser):
+ """Composite argument parser for a Windows remote host."""
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+ return WindowsRemoteConfig()
+
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+ names = list(filter_completion(windows_completion()))
+
+ for target in state.root_namespace.targets or []: # type: WindowsRemoteConfig
+ names.remove(target.name)
+
+ return NamespaceWrappedParser('name', PlatformParser(names))
+
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+ return WindowsRemoteKeyValueParser()
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ content = '\n'.join([f' {name}' for name, item in filter_completion(windows_completion()).items()])
+
+ content += '\n'.join([
+ '',
+ ' windows/{version} # use an unknown windows version',
+ ])
+
+ state.sections['target remote systems (choose one):'] = content
+
+ return f'{{system}}[,{WindowsRemoteKeyValueParser().document(state)}]'
+
+
+class NetworkRemoteParser(PairParser):
+ """Composite argument parser for a network remote host."""
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+ return NetworkRemoteConfig()
+
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+ names = list(filter_completion(network_completion()))
+
+ for target in state.root_namespace.targets or []: # type: NetworkRemoteConfig
+ names.remove(target.name)
+
+ return NamespaceWrappedParser('name', PlatformParser(names))
+
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+ return NetworkRemoteKeyValueParser()
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ content = '\n'.join([f' {name}' for name, item in filter_completion(network_completion()).items()])
+
+ content += '\n'.join([
+ '',
+ ' {platform}/{version} # use an unknown platform and version',
+ ])
+
+ state.sections['target remote systems (choose one):'] = content
+
+ return f'{{system}}[,{NetworkRemoteKeyValueParser().document(state)}]'
+
+
+class WindowsInventoryParser(PairParser):
+ """Composite argument parser for a Windows inventory."""
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+ return WindowsInventoryConfig()
+
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+ return NamespaceWrappedParser('path', FileParser())
+
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+ return EmptyKeyValueParser()
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return '{path} # INI format inventory file'
+
+
+class NetworkInventoryParser(PairParser):
+ """Composite argument parser for a network inventory."""
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+ return NetworkInventoryConfig()
+
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+ return NamespaceWrappedParser('path', FileParser())
+
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+ return EmptyKeyValueParser()
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return '{path} # INI format inventory file'
+
+
+class PosixSshParser(PairParser):
+ """Composite argument parser for a POSIX SSH host."""
+ def create_namespace(self) -> t.Any:
+ """Create and return a namespace."""
+ return PosixSshConfig()
+
+ def get_left_parser(self, state: ParserState) -> Parser:
+ """Return the parser for the left side."""
+ return SshConnectionParser()
+
+ def get_right_parser(self, choice: t.Any) -> Parser:
+ """Return the parser for the right side."""
+ return PosixSshKeyValueParser()
+
+ @property
+ def required(self) -> bool:
+ """True if the delimiter (and thus right parser) is required, otherwise False."""
+ return True
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return f'{SshConnectionParser().document(state)}[,{PosixSshKeyValueParser().document(state)}]'
diff --git a/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
new file mode 100644
index 0000000..049b71e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
@@ -0,0 +1,239 @@
+"""Composite argument key-value parsers used by other parsers."""
+from __future__ import annotations
+
+import typing as t
+
+from ...constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ REMOTE_PROVIDERS,
+ SECCOMP_CHOICES,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ...completion import (
+ AuditMode,
+ CGroupVersion,
+)
+
+from ...util import (
+ REMOTE_ARCHITECTURES,
+)
+
+from ...host_configs import (
+ OriginConfig,
+)
+
+from ...become import (
+ SUPPORTED_BECOME_METHODS,
+)
+
+from ..argparsing.parsers import (
+ AnyParser,
+ BooleanParser,
+ ChoicesParser,
+ DocumentationState,
+ EnumValueChoicesParser,
+ IntegerParser,
+ KeyValueParser,
+ Parser,
+ ParserState,
+)
+
+from .value_parsers import (
+ PythonParser,
+)
+
+from .helpers import (
+ get_controller_pythons,
+ get_remote_pythons,
+ get_docker_pythons,
+)
+
+
+class OriginKeyValueParser(KeyValueParser):
+ """Composite argument parser for origin key/value pairs."""
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ versions = CONTROLLER_PYTHON_VERSIONS
+
+ return dict(
+ python=PythonParser(versions=versions, allow_venv=True, allow_default=True),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ python_parser = PythonParser(versions=CONTROLLER_PYTHON_VERSIONS, allow_venv=True, allow_default=True)
+
+ section_name = 'origin options'
+
+ state.sections[f'controller {section_name} (comma separated):'] = '\n'.join([
+ f' python={python_parser.document(state)}',
+ ])
+
+ return f'{{{section_name}}} # default'
+
+
+class ControllerKeyValueParser(KeyValueParser):
+ """Composite argument parser for controller key/value pairs."""
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ versions = get_controller_pythons(state.root_namespace.controller, False)
+ allow_default = bool(get_controller_pythons(state.root_namespace.controller, True))
+ allow_venv = isinstance(state.root_namespace.controller, OriginConfig) or not state.root_namespace.controller
+
+ return dict(
+ python=PythonParser(versions=versions, allow_venv=allow_venv, allow_default=allow_default),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section_name = 'controller options'
+
+ state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
+ f' python={PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=True).document(state)} # non-origin controller',
+ f' python={PythonParser(SUPPORTED_PYTHON_VERSIONS, allow_venv=True, allow_default=True).document(state)} # origin controller',
+ ])
+
+ return f'{{{section_name}}} # default'
+
+
+class DockerKeyValueParser(KeyValueParser):
+ """Composite argument parser for docker key/value pairs."""
+ def __init__(self, image: str, controller: bool) -> None:
+ self.controller = controller
+ self.versions = get_docker_pythons(image, controller, False)
+ self.allow_default = bool(get_docker_pythons(image, controller, True))
+
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ return dict(
+ python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default),
+ seccomp=ChoicesParser(SECCOMP_CHOICES),
+ cgroup=EnumValueChoicesParser(CGroupVersion),
+ audit=EnumValueChoicesParser(AuditMode),
+ privileged=BooleanParser(),
+ memory=IntegerParser(),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ python_parser = PythonParser(versions=[], allow_venv=False, allow_default=self.allow_default)
+
+ section_name = 'docker options'
+
+ state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([
+ f' python={python_parser.document(state)}',
+ f' seccomp={ChoicesParser(SECCOMP_CHOICES).document(state)}',
+ f' cgroup={EnumValueChoicesParser(CGroupVersion).document(state)}',
+ f' audit={EnumValueChoicesParser(AuditMode).document(state)}',
+ f' privileged={BooleanParser().document(state)}',
+ f' memory={IntegerParser().document(state)} # bytes',
+ ])
+
+ return f'{{{section_name}}}'
+
+
+class PosixRemoteKeyValueParser(KeyValueParser):
+ """Composite argument parser for POSIX remote key/value pairs."""
+ def __init__(self, name: str, controller: bool) -> None:
+ self.controller = controller
+ self.versions = get_remote_pythons(name, controller, False)
+ self.allow_default = bool(get_remote_pythons(name, controller, True))
+
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ return dict(
+ become=ChoicesParser(list(SUPPORTED_BECOME_METHODS)),
+ provider=ChoicesParser(REMOTE_PROVIDERS),
+ arch=ChoicesParser(REMOTE_ARCHITECTURES),
+ python=PythonParser(versions=self.versions, allow_venv=False, allow_default=self.allow_default),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ python_parser = PythonParser(versions=[], allow_venv=False, allow_default=self.allow_default)
+
+ section_name = 'remote options'
+
+ state.sections[f'{"controller" if self.controller else "target"} {section_name} (comma separated):'] = '\n'.join([
+ f' become={ChoicesParser(list(SUPPORTED_BECOME_METHODS)).document(state)}',
+ f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
+ f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
+ f' python={python_parser.document(state)}',
+ ])
+
+ return f'{{{section_name}}}'
+
+
+class WindowsRemoteKeyValueParser(KeyValueParser):
+ """Composite argument parser for Windows remote key/value pairs."""
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ return dict(
+ provider=ChoicesParser(REMOTE_PROVIDERS),
+ arch=ChoicesParser(REMOTE_ARCHITECTURES),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section_name = 'remote options'
+
+ state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
+ f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
+ f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
+ ])
+
+ return f'{{{section_name}}}'
+
+
+class NetworkRemoteKeyValueParser(KeyValueParser):
+ """Composite argument parser for network remote key/value pairs."""
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ return dict(
+ provider=ChoicesParser(REMOTE_PROVIDERS),
+ arch=ChoicesParser(REMOTE_ARCHITECTURES),
+ collection=AnyParser(),
+ connection=AnyParser(),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ section_name = 'remote options'
+
+ state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
+ f' provider={ChoicesParser(REMOTE_PROVIDERS).document(state)}',
+ f' arch={ChoicesParser(REMOTE_ARCHITECTURES).document(state)}',
+ ' collection={collection}',
+ ' connection={connection}',
+ ])
+
+ return f'{{{section_name}}}'
+
+
+class PosixSshKeyValueParser(KeyValueParser):
+ """Composite argument parser for POSIX SSH host key/value pairs."""
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ return dict(
+ python=PythonParser(versions=list(SUPPORTED_PYTHON_VERSIONS), allow_venv=False, allow_default=False),
+ )
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ python_parser = PythonParser(versions=SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=False)
+
+ section_name = 'ssh options'
+
+ state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
+ f' python={python_parser.document(state)}',
+ ])
+
+ return f'{{{section_name}}}'
+
+
+class EmptyKeyValueParser(KeyValueParser):
+ """Composite argument parser when a key/value parser is required but there are no keys available."""
+ def get_parsers(self, state: ParserState) -> dict[str, Parser]:
+ """Return a dictionary of key names and value parsers."""
+ return {}
diff --git a/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py b/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py
new file mode 100644
index 0000000..9453b76
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli/parsers/value_parsers.py
@@ -0,0 +1,179 @@
+"""Composite argument value parsers used by other parsers."""
+from __future__ import annotations
+
+import collections.abc as c
+import typing as t
+
+from ...host_configs import (
+ NativePythonConfig,
+ PythonConfig,
+ VirtualPythonConfig,
+)
+
+from ..argparsing.parsers import (
+ AbsolutePathParser,
+ AnyParser,
+ ChoicesParser,
+ DocumentationState,
+ IntegerParser,
+ MatchConditions,
+ Parser,
+ ParserError,
+ ParserState,
+ ParserBoundary,
+)
+
+
+class PythonParser(Parser):
+ """
+ Composite argument parser for Python versions, with support for specifying paths and using virtual environments.
+
+ Allowed formats:
+
+ {version}
+ venv/{version}
+ venv/system-site-packages/{version}
+
+ The `{version}` has two possible formats:
+
+ X.Y
+ X.Y@{path}
+
+ Where `X.Y` is the Python major and minor version number and `{path}` is an absolute path with one of the following formats:
+
+ /path/to/python
+ /path/to/python/directory/
+
+ When a trailing slash is present, it is considered a directory, and `python{version}` will be appended to it automatically.
+
+ The default path depends on the context:
+
+ - Known docker/remote environments can declare their own path.
+ - The origin host uses `sys.executable` if `{version}` matches the current version in `sys.version_info`.
+ - The origin host (as a controller or target) use the `$PATH` environment variable to find `python{version}`.
+ - As a fallback/default, the path `/usr/bin/python{version}` is used.
+
+ NOTE: The Python path determines where to find the Python interpreter.
+ In the case of an ansible-test managed virtual environment, that Python interpreter will be used to create the virtual environment.
+ So the path given will not be the one actually used for the controller or target.
+
+ Known docker/remote environments limit the available Python versions to configured values known to be valid.
+ The origin host and unknown environments assume all relevant Python versions are available.
+ """
+ def __init__(self,
+ versions: c.Sequence[str],
+ *,
+ allow_default: bool,
+ allow_venv: bool,
+ ):
+ version_choices = list(versions)
+
+ if allow_default:
+ version_choices.append('default')
+
+ first_choices = list(version_choices)
+
+ if allow_venv:
+ first_choices.append('venv/')
+
+ venv_choices = list(version_choices) + ['system-site-packages/']
+
+ self.versions = versions
+ self.allow_default = allow_default
+ self.allow_venv = allow_venv
+ self.version_choices = version_choices
+ self.first_choices = first_choices
+ self.venv_choices = venv_choices
+ self.venv_choices = venv_choices
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ boundary: ParserBoundary
+
+ with state.delimit('@/', required=False) as boundary:
+ version = ChoicesParser(self.first_choices).parse(state)
+
+ python: PythonConfig
+
+ if version == 'venv':
+ with state.delimit('@/', required=False) as boundary:
+ version = ChoicesParser(self.venv_choices).parse(state)
+
+ if version == 'system-site-packages':
+ system_site_packages = True
+
+ with state.delimit('@', required=False) as boundary:
+ version = ChoicesParser(self.version_choices).parse(state)
+ else:
+ system_site_packages = False
+
+ python = VirtualPythonConfig(version=version, system_site_packages=system_site_packages)
+ else:
+ python = NativePythonConfig(version=version)
+
+ if boundary.match == '@':
+ # FUTURE: For OriginConfig or ControllerConfig->OriginConfig the path could be validated with an absolute path parser (file or directory).
+ python.path = AbsolutePathParser().parse(state)
+
+ return python
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+
+ docs = '[venv/[system-site-packages/]]' if self.allow_venv else ''
+
+ if self.versions:
+ docs += '|'.join(self.version_choices)
+ else:
+ docs += '{X.Y}'
+
+ docs += '[@{path|dir/}]'
+
+ return docs
+
+
+class PlatformParser(ChoicesParser):
+ """Composite argument parser for "{platform}/{version}" formatted choices."""
+ def __init__(self, choices: list[str]) -> None:
+ super().__init__(choices, conditions=MatchConditions.CHOICE | MatchConditions.ANY)
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ value = super().parse(state)
+
+ if len(value.split('/')) != 2:
+ raise ParserError(f'invalid platform format: {value}')
+
+ return value
+
+
+class SshConnectionParser(Parser):
+ """
+ Composite argument parser for connecting to a host using SSH.
+ Format: user@host[:port]
+ """
+ EXPECTED_FORMAT = '{user}@{host}[:{port}]'
+
+ def parse(self, state: ParserState) -> t.Any:
+ """Parse the input from the given state and return the result."""
+ namespace = state.current_namespace
+
+ with state.delimit('@'):
+ user = AnyParser(no_match_message=f'Expected {{user}} from: {self.EXPECTED_FORMAT}').parse(state)
+
+ setattr(namespace, 'user', user)
+
+ with state.delimit(':', required=False) as colon: # type: ParserBoundary
+ host = AnyParser(no_match_message=f'Expected {{host}} from: {self.EXPECTED_FORMAT}').parse(state)
+
+ setattr(namespace, 'host', host)
+
+ if colon.match:
+ port = IntegerParser(65535).parse(state)
+ setattr(namespace, 'port', port)
+
+ return namespace
+
+ def document(self, state: DocumentationState) -> t.Optional[str]:
+ """Generate and return documentation for this parser."""
+ return self.EXPECTED_FORMAT
diff --git a/test/lib/ansible_test/_internal/commands/__init__.py b/test/lib/ansible_test/_internal/commands/__init__.py
new file mode 100644
index 0000000..e9cb681
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/__init__.py
@@ -0,0 +1,2 @@
+"""Nearly empty __init__.py to keep pylint happy."""
+from __future__ import annotations
diff --git a/test/lib/ansible_test/_internal/commands/coverage/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/__init__.py
new file mode 100644
index 0000000..139cf3c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/__init__.py
@@ -0,0 +1,370 @@
+"""Common logic for the coverage subcommand."""
+from __future__ import annotations
+
+import collections.abc as c
+import json
+import os
+import re
+import typing as t
+
+from ...encoding import (
+ to_bytes,
+)
+
+from ...io import (
+ read_text_file,
+ read_json_file,
+)
+
+from ...util import (
+ ApplicationError,
+ common_environment,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ...util_common import (
+ intercept_python,
+ ResultType,
+)
+
+from ...config import (
+ EnvironmentConfig,
+)
+
+from ...python_requirements import (
+ install_requirements,
+)
+
+from ...target import (
+ walk_module_targets,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...pypi_proxy import (
+ configure_pypi_proxy,
+)
+
+from ...provisioning import (
+ HostState,
+)
+
+from ...coverage_util import (
+ get_coverage_file_schema_version,
+ CoverageError,
+ CONTROLLER_COVERAGE_VERSION,
+)
+
+if t.TYPE_CHECKING:
+ import coverage as coverage_module
+
+COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
+COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc')
+COVERAGE_OUTPUT_FILE_NAME = 'coverage'
+
+
+class CoverageConfig(EnvironmentConfig):
+ """Configuration for the coverage command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'coverage')
+
+
+def initialize_coverage(args: CoverageConfig, host_state: HostState) -> coverage_module:
+ """Delegate execution if requested, install requirements, then import and return the coverage module. Raises an exception if coverage is not available."""
+ configure_pypi_proxy(args, host_state.controller_profile) # coverage
+ install_requirements(args, host_state.controller_profile.python, coverage=True) # coverage
+
+ try:
+ import coverage
+ except ImportError:
+ coverage = None
+
+ coverage_required_version = CONTROLLER_COVERAGE_VERSION.coverage_version
+
+ if not coverage:
+ raise ApplicationError(f'Version {coverage_required_version} of the Python "coverage" module must be installed to use this command.')
+
+ if coverage.__version__ != coverage_required_version:
+ raise ApplicationError(f'Version {coverage_required_version} of the Python "coverage" module is required. Version {coverage.__version__} was found.')
+
+ return coverage
+
+
+def run_coverage(args: CoverageConfig, host_state: HostState, output_file: str, command: str, cmd: list[str]) -> None:
+ """Run the coverage cli tool with the specified options."""
+ env = common_environment()
+ env.update(dict(COVERAGE_FILE=output_file))
+
+ cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd
+
+ stdout, stderr = intercept_python(args, host_state.controller_profile.python, cmd, env, capture=True)
+
+ stdout = (stdout or '').strip()
+ stderr = (stderr or '').strip()
+
+ if stdout:
+ display.info(stdout)
+
+ if stderr:
+ display.warning(stderr)
+
+
+def get_all_coverage_files() -> list[str]:
+ """Return a list of all coverage file paths."""
+ return get_python_coverage_files() + get_powershell_coverage_files()
+
+
+def get_python_coverage_files(path: t.Optional[str] = None) -> list[str]:
+ """Return the list of Python coverage file paths."""
+ return get_coverage_files('python', path)
+
+
+def get_powershell_coverage_files(path: t.Optional[str] = None) -> list[str]:
+ """Return the list of PowerShell coverage file paths."""
+ return get_coverage_files('powershell', path)
+
+
+def get_coverage_files(language: str, path: t.Optional[str] = None) -> list[str]:
+ """Return the list of coverage file paths for the given language."""
+ coverage_dir = path or ResultType.COVERAGE.path
+
+ try:
+ coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
+ if '=coverage.' in f and '=%s' % language in f]
+ except FileNotFoundError:
+ return []
+
+ return coverage_files
+
+
+def get_collection_path_regexes() -> tuple[t.Optional[t.Pattern], t.Optional[t.Pattern]]:
+ """Return a pair of regexes used for identifying and manipulating collection paths."""
+ if data_context().content.collection:
+ collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
+ collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
+ else:
+ collection_search_re = None
+ collection_sub_re = None
+
+ return collection_search_re, collection_sub_re
+
+
+def get_python_modules() -> dict[str, str]:
+ """Return a dictionary of Ansible module names and their paths."""
+ return dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py'))
+
+
+def enumerate_python_arcs(
+ path: str,
+ coverage: coverage_module,
+ modules: dict[str, str],
+ collection_search_re: t.Optional[t.Pattern],
+ collection_sub_re: t.Optional[t.Pattern],
+) -> c.Generator[tuple[str, set[tuple[int, int]]], None, None]:
+ """Enumerate Python code coverage arcs in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ try:
+ arc_data = read_python_coverage(path, coverage)
+ except CoverageError as ex:
+ display.error(str(ex))
+ return
+
+ for filename, arcs in arc_data.items():
+ if not arcs:
+ # This is most likely due to using an unsupported version of coverage.
+ display.warning('No arcs found for "%s" in coverage file: %s' % (filename, path))
+ continue
+
+ filename = sanitize_filename(filename, modules=modules, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ yield filename, set(arcs)
+
+
+PythonArcs = dict[str, list[tuple[int, int]]]
+"""Python coverage arcs."""
+
+
+def read_python_coverage(path: str, coverage: coverage_module) -> PythonArcs:
+ """Return coverage arcs from the specified coverage file. Raises a CoverageError exception if coverage cannot be read."""
+ try:
+ return read_python_coverage_native(path, coverage)
+ except CoverageError as ex:
+ schema_version = get_coverage_file_schema_version(path)
+
+ if schema_version == CONTROLLER_COVERAGE_VERSION.schema_version:
+ raise CoverageError(path, f'Unexpected failure reading supported schema version {schema_version}.') from ex
+
+ if schema_version == 0:
+ return read_python_coverage_legacy(path)
+
+ raise CoverageError(path, f'Unsupported schema version: {schema_version}')
+
+
+def read_python_coverage_native(path: str, coverage: coverage_module) -> PythonArcs:
+ """Return coverage arcs from the specified coverage file using the coverage API."""
+ try:
+ data = coverage.CoverageData(path)
+ data.read()
+ arcs = {filename: data.arcs(filename) for filename in data.measured_files()}
+ except Exception as ex:
+ raise CoverageError(path, f'Error reading coverage file using coverage API: {ex}') from ex
+
+ return arcs
+
+
+def read_python_coverage_legacy(path: str) -> PythonArcs:
+ """Return coverage arcs from the specified coverage file, which must be in the legacy JSON format."""
+ try:
+ contents = read_text_file(path)
+ contents = re.sub(r'''^!coverage.py: This is a private format, don't read it directly!''', '', contents)
+ data = json.loads(contents)
+ arcs: PythonArcs = {filename: [t.cast(tuple[int, int], tuple(arc)) for arc in arc_list] for filename, arc_list in data['arcs'].items()}
+ except Exception as ex:
+ raise CoverageError(path, f'Error reading JSON coverage file: {ex}') from ex
+
+ return arcs
+
+
+def enumerate_powershell_lines(
+ path: str,
+ collection_search_re: t.Optional[t.Pattern],
+ collection_sub_re: t.Optional[t.Pattern],
+) -> c.Generator[tuple[str, dict[int, int]], None, None]:
+ """Enumerate PowerShell code coverage lines in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ try:
+ coverage_run = read_json_file(path)
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ display.error('%s' % ex)
+ return
+
+ for filename, hits in coverage_run.items():
+ filename = sanitize_filename(filename, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ if isinstance(hits, dict) and not hits.get('Line'):
+ # Input data was previously aggregated and thus uses the standard ansible-test output format for PowerShell coverage.
+ # This format differs from the more verbose format of raw coverage data from the remote Windows hosts.
+ hits = dict((int(key), value) for key, value in hits.items())
+
+ yield filename, hits
+ continue
+
+ # PowerShell unpacks arrays if there's only a single entry so this is a defensive check on that
+ if not isinstance(hits, list):
+ hits = [hits]
+
+ hits = dict((hit['Line'], hit['HitCount']) for hit in hits if hit)
+
+ yield filename, hits
+
+
+def sanitize_filename(
+ filename: str,
+ modules: t.Optional[dict[str, str]] = None,
+ collection_search_re: t.Optional[t.Pattern] = None,
+ collection_sub_re: t.Optional[t.Pattern] = None,
+) -> t.Optional[str]:
+ """Convert the given code coverage path to a local absolute path and return its, or None if the path is not valid."""
+ ansible_path = os.path.abspath('lib/ansible/') + '/'
+ root_path = data_context().content.root + '/'
+ integration_temp_path = os.path.sep + os.path.join(ResultType.TMP.relative_path, 'integration') + os.path.sep
+
+ if modules is None:
+ modules = {}
+
+ if '/ansible_modlib.zip/ansible/' in filename:
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
+ new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif collection_search_re and collection_search_re.search(filename):
+ new_name = os.path.abspath(collection_sub_re.sub('', filename))
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
+ new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif '/ansible_module_' in filename:
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
+ module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
+ # AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
+ # AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
+ module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$',
+ '\\g<module>', filename).rstrip('_')
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search('^(/.*?)?/root/ansible/', filename):
+ # Rewrite the path of code running on a remote host or in a docker container as root.
+ new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif integration_temp_path in filename:
+ # Rewrite the path of code running from an integration test temporary directory.
+ new_name = re.sub(r'^.*' + re.escape(integration_temp_path) + '[^/]+/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+
+ filename = os.path.abspath(filename) # make sure path is absolute (will be relative if previously exported)
+
+ return filename
+
+
+class PathChecker:
+ """Checks code coverage paths to verify they are valid and reports on the findings."""
+ def __init__(self, args: CoverageConfig, collection_search_re: t.Optional[t.Pattern] = None) -> None:
+ self.args = args
+ self.collection_search_re = collection_search_re
+ self.invalid_paths: list[str] = []
+ self.invalid_path_chars = 0
+
+ def check_path(self, path: str) -> bool:
+ """Return True if the given coverage path is valid, otherwise display a warning and return False."""
+ if os.path.isfile(to_bytes(path)):
+ return True
+
+ if self.collection_search_re and self.collection_search_re.search(path) and os.path.basename(path) == '__init__.py':
+ # the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
+ # coverage is still reported for these non-existent files, but warnings are not needed
+ return False
+
+ self.invalid_paths.append(path)
+ self.invalid_path_chars += len(path)
+
+ if self.args.verbosity > 1:
+ display.warning('Invalid coverage path: %s' % path)
+
+ return False
+
+ def report(self) -> None:
+ """Display a warning regarding invalid paths if any were found."""
+ if self.invalid_paths:
+ display.warning('Ignored %d characters from %d invalid coverage path(s).' % (self.invalid_path_chars, len(self.invalid_paths)))
diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py
new file mode 100644
index 0000000..37859e8
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/__init__.py
@@ -0,0 +1,17 @@
+"""Common logic for the `coverage analyze` subcommand."""
+from __future__ import annotations
+import typing as t
+
+from .. import (
+ CoverageConfig,
+)
+
+
+class CoverageAnalyzeConfig(CoverageConfig):
+ """Configuration for the `coverage analyze` command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ # avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands
+ # this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used
+ self.display_stderr = True
diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py
new file mode 100644
index 0000000..ad6cf86
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/__init__.py
@@ -0,0 +1,154 @@
+"""Analyze integration test target code coverage."""
+from __future__ import annotations
+
+import collections.abc as c
+import os
+import typing as t
+
+from .....io import (
+ read_json_file,
+ write_json_file,
+)
+
+from .....util import (
+ ApplicationError,
+ display,
+)
+
+from .. import (
+ CoverageAnalyzeConfig,
+)
+
+TargetKey = t.TypeVar('TargetKey', int, tuple[int, int])
+TFlexKey = t.TypeVar('TFlexKey', int, tuple[int, int], str)
+NamedPoints = dict[str, dict[TargetKey, set[str]]]
+IndexedPoints = dict[str, dict[TargetKey, set[int]]]
+Arcs = dict[str, dict[tuple[int, int], set[int]]]
+Lines = dict[str, dict[int, set[int]]]
+TargetIndexes = dict[str, int]
+TargetSetIndexes = dict[frozenset[int], int]
+
+
+class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig):
+ """Configuration for the `coverage analyze targets` command."""
+
+
+def make_report(target_indexes: TargetIndexes, arcs: Arcs, lines: Lines) -> dict[str, t.Any]:
+ """Condense target indexes, arcs and lines into a compact report."""
+ set_indexes: TargetSetIndexes = {}
+ arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items())
+ line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items())
+
+ report = dict(
+ targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])],
+ target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])],
+ arcs=arc_refs,
+ lines=line_refs,
+ )
+
+ return report
+
+
+def load_report(report: dict[str, t.Any]) -> tuple[list[str], Arcs, Lines]:
+ """Extract target indexes, arcs and lines from an existing report."""
+ try:
+ target_indexes: list[str] = report['targets']
+ target_sets: list[list[int]] = report['target_sets']
+ arc_data: dict[str, dict[str, int]] = report['arcs']
+ line_data: dict[str, dict[int, int]] = report['lines']
+ except KeyError as ex:
+ raise ApplicationError('Document is missing key "%s".' % ex.args)
+ except TypeError:
+ raise ApplicationError('Document is type "%s" instead of "dict".' % type(report).__name__)
+
+ arcs = dict((path, dict((parse_arc(arc), set(target_sets[index])) for arc, index in data.items())) for path, data in arc_data.items())
+ lines = dict((path, dict((int(line), set(target_sets[index])) for line, index in data.items())) for path, data in line_data.items())
+
+ return target_indexes, arcs, lines
+
+
+def read_report(path: str) -> tuple[list[str], Arcs, Lines]:
+ """Read a JSON report from disk."""
+ try:
+ report = read_json_file(path)
+ except Exception as ex:
+ raise ApplicationError('File "%s" is not valid JSON: %s' % (path, ex))
+
+ try:
+ return load_report(report)
+ except ApplicationError as ex:
+ raise ApplicationError('File "%s" is not an aggregated coverage data file. %s' % (path, ex))
+
+
+def write_report(args: CoverageAnalyzeTargetsConfig, report: dict[str, t.Any], path: str) -> None:
+ """Write a JSON report to disk."""
+ if args.explain:
+ return
+
+ write_json_file(path, report, formatted=False)
+
+ display.info('Generated %d byte report with %d targets covering %d files.' % (
+ os.path.getsize(path), len(report['targets']), len(set(report['arcs'].keys()) | set(report['lines'].keys())),
+ ), verbosity=1)
+
+
+def format_line(value: int) -> str:
+ """Format line as a string."""
+ return str(value) # putting this in a function keeps both pylint and mypy happy
+
+
+def format_arc(value: tuple[int, int]) -> str:
+ """Format an arc tuple as a string."""
+ return '%d:%d' % value
+
+
+def parse_arc(value: str) -> tuple[int, int]:
+ """Parse an arc string into a tuple."""
+ first, last = tuple(map(int, value.split(':')))
+ return first, last
+
+
+def get_target_set_index(data: set[int], target_set_indexes: TargetSetIndexes) -> int:
+ """Find or add the target set in the result set and return the target set index."""
+ return target_set_indexes.setdefault(frozenset(data), len(target_set_indexes))
+
+
+def get_target_index(name: str, target_indexes: TargetIndexes) -> int:
+ """Find or add the target in the result set and return the target index."""
+ return target_indexes.setdefault(name, len(target_indexes))
+
+
+def expand_indexes(
+ source_data: IndexedPoints,
+ source_index: list[str],
+ format_func: c.Callable[[TargetKey], TFlexKey],
+) -> dict[str, dict[TFlexKey, set[str]]]:
+ """Expand indexes from the source into target names for easier processing of the data (arcs or lines)."""
+ combined_data: dict[str, dict[TFlexKey, set[str]]] = {}
+
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(format_func(covered_point), set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(source_index[covered_target_index])
+
+ return combined_data
+
+
+def generate_indexes(target_indexes: TargetIndexes, data: NamedPoints) -> IndexedPoints:
+ """Return an indexed version of the given data (arcs or points)."""
+ results: IndexedPoints = {}
+
+ for path, points in data.items():
+ result_points = results[path] = {}
+
+ for point, target_names in points.items():
+ result_point = result_points[point] = set()
+
+ for target_name in target_names:
+ result_point.add(get_target_index(target_name, target_indexes))
+
+ return results
diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py
new file mode 100644
index 0000000..e3782ce
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/combine.py
@@ -0,0 +1,74 @@
+"""Combine integration test target code coverage reports."""
+from __future__ import annotations
+import typing as t
+
+from .....executor import (
+ Delegate,
+)
+
+from .....provisioning import (
+ prepare_profiles,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+from . import (
+ Arcs,
+ IndexedPoints,
+ Lines,
+ TargetIndexes,
+)
+
+
+class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets combine` command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ self.input_files: list[str] = args.input_file
+ self.output_file: str = args.output_file
+
+
+def command_coverage_analyze_targets_combine(args: CoverageAnalyzeTargetsCombineConfig) -> None:
+ """Combine integration test target code coverage reports."""
+ host_state = prepare_profiles(args) # coverage analyze targets combine
+
+ if args.delegate:
+ raise Delegate(host_state=host_state)
+
+ combined_target_indexes: TargetIndexes = {}
+ combined_path_arcs: Arcs = {}
+ combined_path_lines: Lines = {}
+
+ for report_path in args.input_files:
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
+
+ merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
+ merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
+
+ report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def merge_indexes(
+ source_data: IndexedPoints,
+ source_index: list[str],
+ combined_data: IndexedPoints,
+ combined_index: TargetIndexes,
+) -> None:
+ """Merge indexes from the source into the combined data set (arcs or lines)."""
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(covered_point, set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py
new file mode 100644
index 0000000..ba90387
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/expand.py
@@ -0,0 +1,51 @@
+"""Expand target names in an aggregated coverage file."""
+from __future__ import annotations
+import typing as t
+
+from .....io import (
+ SortedSetEncoder,
+ write_json_file,
+)
+
+from .....executor import (
+ Delegate,
+)
+
+from .....provisioning import (
+ prepare_profiles,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ format_arc,
+ format_line,
+ read_report,
+)
+
+
+class CoverageAnalyzeTargetsExpandConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets expand` command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ self.input_file: str = args.input_file
+ self.output_file: str = args.output_file
+
+
+def command_coverage_analyze_targets_expand(args: CoverageAnalyzeTargetsExpandConfig) -> None:
+ """Expand target names in an aggregated coverage file."""
+ host_state = prepare_profiles(args) # coverage analyze targets expand
+
+ if args.delegate:
+ raise Delegate(host_state=host_state)
+
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ report = dict(
+ arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc),
+ lines=expand_indexes(covered_path_lines, covered_targets, format_line),
+ )
+
+ if not args.explain:
+ write_json_file(args.output_file, report, encoder=SortedSetEncoder)
diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py
new file mode 100644
index 0000000..29a8ee5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/filter.py
@@ -0,0 +1,122 @@
+"""Filter an aggregated coverage file, keeping only the specified targets."""
+from __future__ import annotations
+
+import collections.abc as c
+import re
+import typing as t
+
+from .....executor import (
+ Delegate,
+)
+
+from .....provisioning import (
+ prepare_profiles,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ generate_indexes,
+ make_report,
+ read_report,
+ write_report,
+)
+
+from . import (
+ NamedPoints,
+ TargetKey,
+ TargetIndexes,
+)
+
+
+class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets filter` command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ self.input_file: str = args.input_file
+ self.output_file: str = args.output_file
+ self.include_targets: list[str] = args.include_targets
+ self.exclude_targets: list[str] = args.exclude_targets
+ self.include_path: t.Optional[str] = args.include_path
+ self.exclude_path: t.Optional[str] = args.exclude_path
+
+
+def command_coverage_analyze_targets_filter(args: CoverageAnalyzeTargetsFilterConfig) -> None:
+ """Filter target names in an aggregated coverage file."""
+ host_state = prepare_profiles(args) # coverage analyze targets filter
+
+ if args.delegate:
+ raise Delegate(host_state=host_state)
+
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ def pass_target_key(value: TargetKey) -> TargetKey:
+ """Return the given target key unmodified."""
+ return value
+
+ filtered_path_arcs = expand_indexes(covered_path_arcs, covered_targets, pass_target_key)
+ filtered_path_lines = expand_indexes(covered_path_lines, covered_targets, pass_target_key)
+
+ include_targets = set(args.include_targets) if args.include_targets else None
+ exclude_targets = set(args.exclude_targets) if args.exclude_targets else None
+
+ include_path = re.compile(args.include_path) if args.include_path else None
+ exclude_path = re.compile(args.exclude_path) if args.exclude_path else None
+
+ def path_filter_func(path: str) -> bool:
+ """Return True if the given path should be included, otherwise return False."""
+ if include_path and not re.search(include_path, path):
+ return False
+
+ if exclude_path and re.search(exclude_path, path):
+ return False
+
+ return True
+
+ def target_filter_func(targets: set[str]) -> set[str]:
+ """Filter the given targets and return the result based on the defined includes and excludes."""
+ if include_targets:
+ targets &= include_targets
+
+ if exclude_targets:
+ targets -= exclude_targets
+
+ return targets
+
+ filtered_path_arcs = filter_data(filtered_path_arcs, path_filter_func, target_filter_func)
+ filtered_path_lines = filter_data(filtered_path_lines, path_filter_func, target_filter_func)
+
+ target_indexes: TargetIndexes = {}
+ indexed_path_arcs = generate_indexes(target_indexes, filtered_path_arcs)
+ indexed_path_lines = generate_indexes(target_indexes, filtered_path_lines)
+
+ report = make_report(target_indexes, indexed_path_arcs, indexed_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def filter_data(
+ data: NamedPoints,
+ path_filter_func: c.Callable[[str], bool],
+ target_filter_func: c.Callable[[set[str]], set[str]],
+) -> NamedPoints:
+ """Filter the data set using the specified filter function."""
+ result: NamedPoints = {}
+
+ for src_path, src_points in data.items():
+ if not path_filter_func(src_path):
+ continue
+
+ dst_points = {}
+
+ for src_point, src_targets in src_points.items():
+ dst_targets = target_filter_func(src_targets)
+
+ if dst_targets:
+ dst_points[src_point] = dst_targets
+
+ if dst_points:
+ result[src_path] = dst_points
+
+ return result
diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py
new file mode 100644
index 0000000..127b5b7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/generate.py
@@ -0,0 +1,158 @@
+"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+from __future__ import annotations
+
+import os
+import typing as t
+
+from .....encoding import (
+ to_text,
+)
+
+from .....data import (
+ data_context,
+)
+
+from .....util_common import (
+ ResultType,
+)
+
+from .....executor import (
+ Delegate,
+)
+
+from .....provisioning import (
+ prepare_profiles,
+ HostState,
+)
+
+from ... import (
+ enumerate_powershell_lines,
+ enumerate_python_arcs,
+ get_collection_path_regexes,
+ get_powershell_coverage_files,
+ get_python_coverage_files,
+ get_python_modules,
+ initialize_coverage,
+ PathChecker,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ write_report,
+)
+
+from . import (
+ Arcs,
+ Lines,
+ TargetIndexes,
+)
+
+
+class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets generate` command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ self.input_dir: str = args.input_dir or ResultType.COVERAGE.path
+ self.output_file: str = args.output_file
+
+
+def command_coverage_analyze_targets_generate(args: CoverageAnalyzeTargetsGenerateConfig) -> None:
+ """Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+ host_state = prepare_profiles(args) # coverage analyze targets generate
+
+ if args.delegate:
+ raise Delegate(host_state)
+
+ root = data_context().content.root
+ target_indexes: TargetIndexes = {}
+ arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, host_state, args.input_dir, target_indexes).items())
+ lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items())
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def analyze_python_coverage(
+ args: CoverageAnalyzeTargetsGenerateConfig,
+ host_state: HostState,
+ path: str,
+ target_indexes: TargetIndexes,
+) -> Arcs:
+ """Analyze Python code coverage."""
+ results: Arcs = {}
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ modules = get_python_modules()
+ python_files = get_python_coverage_files(path)
+ coverage = initialize_coverage(args, host_state)
+
+ for python_file in python_files:
+ if not is_integration_coverage_file(python_file):
+ continue
+
+ target_name = get_target_name(python_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re):
+ arcs = results.setdefault(filename, {})
+
+ for covered_arc in covered_arcs:
+ arc = arcs.setdefault(covered_arc, set())
+ arc.add(target_index)
+
+ prune_invalid_filenames(args, results, collection_search_re=collection_search_re)
+
+ return results
+
+
+def analyze_powershell_coverage(
+ args: CoverageAnalyzeTargetsGenerateConfig,
+ path: str,
+ target_indexes: TargetIndexes,
+) -> Lines:
+ """Analyze PowerShell code coverage"""
+ results: Lines = {}
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ powershell_files = get_powershell_coverage_files(path)
+
+ for powershell_file in powershell_files:
+ if not is_integration_coverage_file(powershell_file):
+ continue
+
+ target_name = get_target_name(powershell_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, hits in enumerate_powershell_lines(powershell_file, collection_search_re, collection_sub_re):
+ lines = results.setdefault(filename, {})
+
+ for covered_line in hits:
+ line = lines.setdefault(covered_line, set())
+ line.add(target_index)
+
+ prune_invalid_filenames(args, results)
+
+ return results
+
+
+def prune_invalid_filenames(
+ args: CoverageAnalyzeTargetsGenerateConfig,
+ results: dict[str, t.Any],
+ collection_search_re: t.Optional[t.Pattern] = None,
+) -> None:
+ """Remove invalid filenames from the given result set."""
+ path_checker = PathChecker(args, collection_search_re)
+
+ for path in list(results.keys()):
+ if not path_checker.check_path(path):
+ del results[path]
+
+
+def get_target_name(path: str) -> str:
+ """Extract the test target name from the given coverage path."""
+ return to_text(os.path.basename(path).split('=')[1])
+
+
+def is_integration_coverage_file(path: str) -> bool:
+ """Returns True if the coverage file came from integration tests, otherwise False."""
+ return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration')
diff --git a/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py
new file mode 100644
index 0000000..c1c77e7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/analyze/targets/missing.py
@@ -0,0 +1,119 @@
+"""Identify aggregated coverage in one file missing from another."""
+from __future__ import annotations
+
+import os
+import typing as t
+
+from .....encoding import (
+ to_bytes,
+)
+
+from .....executor import (
+ Delegate,
+)
+
+from .....provisioning import (
+ prepare_profiles,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+from . import (
+ TargetIndexes,
+ IndexedPoints,
+)
+
+
+class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets missing` command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ self.from_file: str = args.from_file
+ self.to_file: str = args.to_file
+ self.output_file: str = args.output_file
+
+ self.only_gaps: bool = args.only_gaps
+ self.only_exists: bool = args.only_exists
+
+
+def command_coverage_analyze_targets_missing(args: CoverageAnalyzeTargetsMissingConfig) -> None:
+ """Identify aggregated coverage in one file missing from another."""
+ host_state = prepare_profiles(args) # coverage analyze targets missing
+
+ if args.delegate:
+ raise Delegate(host_state=host_state)
+
+ from_targets, from_path_arcs, from_path_lines = read_report(args.from_file)
+ to_targets, to_path_arcs, to_path_lines = read_report(args.to_file)
+ target_indexes: TargetIndexes = {}
+
+ if args.only_gaps:
+ arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists)
+ lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists)
+ else:
+ arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists)
+ lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists)
+
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def find_gaps(
+ from_data: IndexedPoints,
+ from_index: list[str],
+ to_data: IndexedPoints,
+ target_indexes: TargetIndexes,
+ only_exists: bool,
+) -> IndexedPoints:
+ """Find gaps in coverage between the from and to data sets."""
+ target_data: IndexedPoints = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ gaps = set(from_points.keys()) - set(to_points.keys())
+
+ if gaps:
+ gap_points = dict((key, value) for key, value in from_points.items() if key in gaps)
+ target_data[from_path] = dict((gap, set(get_target_index(from_index[i], target_indexes) for i in indexes)) for gap, indexes in gap_points.items())
+
+ return target_data
+
+
+def find_missing(
+ from_data: IndexedPoints,
+ from_index: list[str],
+ to_data: IndexedPoints,
+ to_index: list[str],
+ target_indexes: TargetIndexes,
+ only_exists: bool,
+) -> IndexedPoints:
+ """Find coverage in from_data not present in to_data (arcs or lines)."""
+ target_data: IndexedPoints = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ for from_point, from_target_indexes in from_points.items():
+ to_target_indexes = to_points.get(from_point, set())
+
+ remaining_targets = set(from_index[i] for i in from_target_indexes) - set(to_index[i] for i in to_target_indexes)
+
+ if remaining_targets:
+ target_index = target_data.setdefault(from_path, {}).setdefault(from_point, set())
+ target_index.update(get_target_index(name, target_indexes) for name in remaining_targets)
+
+ return target_data
diff --git a/test/lib/ansible_test/_internal/commands/coverage/combine.py b/test/lib/ansible_test/_internal/commands/coverage/combine.py
new file mode 100644
index 0000000..66210c7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/combine.py
@@ -0,0 +1,362 @@
+"""Combine code coverage files."""
+from __future__ import annotations
+
+import collections.abc as c
+import os
+import json
+import typing as t
+
+from ...target import (
+ walk_compile_targets,
+ walk_powershell_targets,
+)
+
+from ...io import (
+ read_text_file,
+)
+
+from ...util import (
+ ANSIBLE_TEST_TOOLS_ROOT,
+ display,
+ ApplicationError,
+ raw_command,
+)
+
+from ...util_common import (
+ ResultType,
+ write_json_file,
+ write_json_test_results,
+)
+
+from ...executor import (
+ Delegate,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ DockerConfig,
+ RemoteConfig,
+)
+
+from ...provisioning import (
+ HostState,
+ prepare_profiles,
+)
+
+from . import (
+ enumerate_python_arcs,
+ enumerate_powershell_lines,
+ get_collection_path_regexes,
+ get_all_coverage_files,
+ get_python_coverage_files,
+ get_python_modules,
+ get_powershell_coverage_files,
+ initialize_coverage,
+ COVERAGE_OUTPUT_FILE_NAME,
+ COVERAGE_GROUPS,
+ CoverageConfig,
+ PathChecker,
+)
+
+TValue = t.TypeVar('TValue')
+
+
+def command_coverage_combine(args: CoverageCombineConfig) -> None:
+ """Patch paths in coverage files and merge into a single file."""
+ host_state = prepare_profiles(args) # coverage combine
+ combine_coverage_files(args, host_state)
+
+
+def combine_coverage_files(args: CoverageCombineConfig, host_state: HostState) -> list[str]:
+ """Combine coverage and return a list of the resulting files."""
+ if args.delegate:
+ if isinstance(args.controller, (DockerConfig, RemoteConfig)):
+ paths = get_all_coverage_files()
+ exported_paths = [path for path in paths if os.path.basename(path).split('=')[-1].split('.')[:2] == ['coverage', 'combined']]
+
+ if not exported_paths:
+ raise ExportedCoverageDataNotFound()
+
+ pairs = [(path, os.path.relpath(path, data_context().content.root)) for path in exported_paths]
+
+ def coverage_callback(files: list[tuple[str, str]]) -> None:
+ """Add the coverage files to the payload file list."""
+ display.info('Including %d exported coverage file(s) in payload.' % len(pairs), verbosity=1)
+ files.extend(pairs)
+
+ data_context().register_payload_callback(coverage_callback)
+
+ raise Delegate(host_state=host_state)
+
+ paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args, host_state)
+
+ for path in paths:
+ display.info('Generated combined output: %s' % path, verbosity=1)
+
+ return paths
+
+
+class ExportedCoverageDataNotFound(ApplicationError):
+ """Exception when no combined coverage data is present yet is required."""
+ def __init__(self) -> None:
+ super().__init__(
+ 'Coverage data must be exported before processing with the `--docker` or `--remote` option.\n'
+ 'Export coverage with `ansible-test coverage combine` using the `--export` option.\n'
+ 'The exported files must be in the directory: %s/' % ResultType.COVERAGE.relative_path)
+
+
+def _command_coverage_combine_python(args: CoverageCombineConfig, host_state: HostState) -> list[str]:
+ """Combine Python coverage files and return a list of the output files."""
+ coverage = initialize_coverage(args, host_state)
+
+ modules = get_python_modules()
+
+ coverage_files = get_python_coverage_files()
+
+ def _default_stub_value(source_paths: list[str]) -> dict[str, set[tuple[int, int]]]:
+ return {path: set() for path in source_paths}
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_compile_targets)
+ groups = _build_stub_groups(args, sources, _default_stub_value)
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re):
+ if args.export:
+ filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
+
+ if group not in groups:
+ groups[group] = {}
+
+ arc_data = groups[group]
+
+ if filename not in arc_data:
+ arc_data[filename] = set()
+
+ arc_data[filename].update(arcs)
+
+ output_files = []
+
+ if args.export:
+ coverage_file = os.path.join(args.export, '')
+ suffix = '=coverage.combined'
+ else:
+ coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
+ suffix = ''
+
+ path_checker = PathChecker(args, collection_search_re)
+
+ for group in sorted(groups):
+ arc_data = groups[group]
+ output_file = coverage_file + group + suffix
+
+ if args.explain:
+ continue
+
+ updated = coverage.CoverageData(output_file)
+
+ for filename in arc_data:
+ if not path_checker.check_path(filename):
+ continue
+
+ updated.add_arcs({filename: list(arc_data[filename])})
+
+ if args.all:
+ updated.add_arcs(dict((source[0], []) for source in sources))
+
+ updated.write() # always write files to make sure stale files do not exist
+
+ if updated:
+ # only report files which are non-empty to prevent coverage from reporting errors
+ output_files.append(output_file)
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _command_coverage_combine_powershell(args: CoverageCombineConfig) -> list[str]:
+ """Combine PowerShell coverage files and return a list of the output files."""
+ coverage_files = get_powershell_coverage_files()
+
+ def _default_stub_value(source_paths: list[str]) -> dict[str, dict[int, int]]:
+ cmd = ['pwsh', os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'coverage_stub.ps1')]
+ cmd.extend(source_paths)
+
+ stubs = json.loads(raw_command(cmd, capture=True)[0])
+
+ return dict((d['Path'], dict((line, 0) for line in d['Lines'])) for d in stubs)
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_powershell_targets)
+ groups = _build_stub_groups(args, sources, _default_stub_value)
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re):
+ if args.export:
+ filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
+
+ if group not in groups:
+ groups[group] = {}
+
+ coverage_data = groups[group]
+
+ if filename not in coverage_data:
+ coverage_data[filename] = {}
+
+ file_coverage = coverage_data[filename]
+
+ for line_no, hit_count in hits.items():
+ file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count
+
+ output_files = []
+
+ path_checker = PathChecker(args)
+
+ for group in sorted(groups):
+ coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename))
+
+ if args.all:
+ missing_sources = [source for source, _source_line_count in sources if source not in coverage_data]
+ coverage_data.update(_default_stub_value(missing_sources))
+
+ if not args.explain:
+ if args.export:
+ output_file = os.path.join(args.export, group + '=coverage.combined')
+ write_json_file(output_file, coverage_data, formatted=False)
+ output_files.append(output_file)
+ continue
+
+ output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
+
+ write_json_test_results(ResultType.COVERAGE, output_file, coverage_data, formatted=False)
+
+ output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _get_coverage_targets(args: CoverageCombineConfig, walk_func: c.Callable) -> list[tuple[str, int]]:
+ """Return a list of files to cover and the number of lines in each file, using the given function as the source of the files."""
+ sources = []
+
+ if args.all or args.stub:
+ # excludes symlinks of regular files to avoid reporting on the same file multiple times
+ # in the future it would be nice to merge any coverage for symlinks into the real files
+ for target in walk_func(include_symlinks=False):
+ target_path = os.path.abspath(target.path)
+
+ target_lines = len(read_text_file(target_path).splitlines())
+
+ sources.append((target_path, target_lines))
+
+ sources.sort()
+
+ return sources
+
+
+def _build_stub_groups(
+ args: CoverageCombineConfig,
+ sources: list[tuple[str, int]],
+ default_stub_value: c.Callable[[list[str]], dict[str, TValue]],
+) -> dict[str, dict[str, TValue]]:
+ """
+ Split the given list of sources with line counts into groups, maintaining a maximum line count for each group.
+ Each group consists of a dictionary of sources and default coverage stubs generated by the provided default_stub_value function.
+ """
+ groups = {}
+
+ if args.stub:
+ stub_group: list[str] = []
+ stub_groups = [stub_group]
+ stub_line_limit = 500000
+ stub_line_count = 0
+
+ for source, source_line_count in sources:
+ stub_group.append(source)
+ stub_line_count += source_line_count
+
+ if stub_line_count > stub_line_limit:
+ stub_line_count = 0
+ stub_group = []
+ stub_groups.append(stub_group)
+
+ for stub_index, stub_group in enumerate(stub_groups):
+ if not stub_group:
+ continue
+
+ groups['=stub-%02d' % (stub_index + 1)] = default_stub_value(stub_group)
+
+ return groups
+
+
+def get_coverage_group(args: CoverageCombineConfig, coverage_file: str) -> t.Optional[str]:
+ """Return the name of the coverage group for the specified coverage file, or None if no group was found."""
+ parts = os.path.basename(coverage_file).split('=', 4)
+
+ if len(parts) != 5 or not parts[4].startswith('coverage.'):
+ return None
+
+ names = dict(
+ command=parts[0],
+ target=parts[1],
+ environment=parts[2],
+ version=parts[3],
+ )
+
+ export_names = dict(
+ version=parts[3],
+ )
+
+ group = ''
+
+ for part in COVERAGE_GROUPS:
+ if part in args.group_by:
+ group += '=%s' % names[part]
+ elif args.export:
+ group += '=%s' % export_names.get(part, 'various')
+
+ if args.export:
+ group = group.lstrip('=')
+
+ return group
+
+
+class CoverageCombineConfig(CoverageConfig):
+ """Configuration for the coverage combine command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ self.group_by: frozenset[str] = frozenset(args.group_by) if args.group_by else frozenset()
+ self.all: bool = args.all
+ self.stub: bool = args.stub
+
+ # only available to coverage combine
+ self.export: str = args.export if 'export' in args else False
diff --git a/test/lib/ansible_test/_internal/commands/coverage/erase.py b/test/lib/ansible_test/_internal/commands/coverage/erase.py
new file mode 100644
index 0000000..70b685c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/erase.py
@@ -0,0 +1,43 @@
+"""Erase code coverage files."""
+from __future__ import annotations
+
+import os
+
+from ...util_common import (
+ ResultType,
+)
+
+from ...executor import (
+ Delegate,
+)
+
+from ...provisioning import (
+ prepare_profiles,
+)
+
+from . import (
+ CoverageConfig,
+)
+
+
+def command_coverage_erase(args: CoverageEraseConfig) -> None:
+ """Erase code coverage data files collected during test runs."""
+ host_state = prepare_profiles(args) # coverage erase
+
+ if args.delegate:
+ raise Delegate(host_state=host_state)
+
+ coverage_dir = ResultType.COVERAGE.path
+
+ for name in os.listdir(coverage_dir):
+ if not name.startswith('coverage') and '=coverage.' not in name:
+ continue
+
+ path = os.path.join(coverage_dir, name)
+
+ if not args.explain:
+ os.remove(path)
+
+
+class CoverageEraseConfig(CoverageConfig):
+ """Configuration for the coverage erase command."""
diff --git a/test/lib/ansible_test/_internal/commands/coverage/html.py b/test/lib/ansible_test/_internal/commands/coverage/html.py
new file mode 100644
index 0000000..e3063c0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/html.py
@@ -0,0 +1,51 @@
+"""Generate HTML code coverage reports."""
+from __future__ import annotations
+
+import os
+
+from ...io import (
+ make_dirs,
+)
+
+from ...util import (
+ display,
+)
+
+from ...util_common import (
+ ResultType,
+)
+
+from ...provisioning import (
+ prepare_profiles,
+)
+
+from .combine import (
+ combine_coverage_files,
+ CoverageCombineConfig,
+)
+
+from . import (
+ run_coverage,
+)
+
+
+def command_coverage_html(args: CoverageHtmlConfig) -> None:
+ """Generate an HTML coverage report."""
+ host_state = prepare_profiles(args) # coverage html
+ output_files = combine_coverage_files(args, host_state)
+
+ for output_file in output_files:
+ if output_file.endswith('-powershell'):
+ # coverage.py does not support non-Python files so we just skip the local html report.
+ display.info("Skipping output file %s in html generation" % output_file, verbosity=3)
+ continue
+
+ dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file))
+ make_dirs(dir_name)
+ run_coverage(args, host_state, output_file, 'html', ['-i', '-d', dir_name])
+
+ display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html'))
+
+
+class CoverageHtmlConfig(CoverageCombineConfig):
+ """Configuration for the coverage html command."""
diff --git a/test/lib/ansible_test/_internal/commands/coverage/report.py b/test/lib/ansible_test/_internal/commands/coverage/report.py
new file mode 100644
index 0000000..fadc13f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/report.py
@@ -0,0 +1,152 @@
+"""Generate console code coverage reports."""
+from __future__ import annotations
+
+import os
+import typing as t
+
+from ...io import (
+ read_json_file,
+)
+
+from ...util import (
+ display,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...provisioning import (
+ prepare_profiles,
+)
+
+from .combine import (
+ combine_coverage_files,
+ CoverageCombineConfig,
+)
+
+from . import (
+ run_coverage,
+)
+
+
+def command_coverage_report(args: CoverageReportConfig) -> None:
+ """Generate a console coverage report."""
+ host_state = prepare_profiles(args) # coverage report
+ output_files = combine_coverage_files(args, host_state)
+
+ for output_file in output_files:
+ if args.group_by or args.stub:
+ display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
+
+ if output_file.endswith('-powershell'):
+ display.info(_generate_powershell_output_report(args, output_file))
+ else:
+ options = []
+
+ if args.show_missing:
+ options.append('--show-missing')
+
+ if args.include:
+ options.extend(['--include', args.include])
+
+ if args.omit:
+ options.extend(['--omit', args.omit])
+
+ run_coverage(args, host_state, output_file, 'report', options)
+
+
+def _generate_powershell_output_report(args: CoverageReportConfig, coverage_file: str) -> str:
+ """Generate and return a PowerShell coverage report for the given coverage file."""
+ coverage_info = read_json_file(coverage_file)
+
+ root_path = data_context().content.root + '/'
+
+ name_padding = 7
+ cover_padding = 8
+
+ file_report = []
+ total_stmts = 0
+ total_miss = 0
+
+ for filename in sorted(coverage_info.keys()):
+ hit_info = coverage_info[filename]
+
+ if filename.startswith(root_path):
+ filename = filename[len(root_path):]
+
+ if args.omit and filename in args.omit:
+ continue
+ if args.include and filename not in args.include:
+ continue
+
+ stmts = len(hit_info)
+ miss = len([hit for hit in hit_info.values() if hit == 0])
+
+ name_padding = max(name_padding, len(filename) + 3)
+
+ total_stmts += stmts
+ total_miss += miss
+
+ cover = "{0}%".format(int((stmts - miss) / stmts * 100))
+
+ missing = []
+ current_missing = None
+ sorted_lines = sorted([int(x) for x in hit_info.keys()])
+ for idx, line in enumerate(sorted_lines):
+ hit = hit_info[str(line)]
+ if hit == 0 and current_missing is None:
+ current_missing = line
+ elif hit != 0 and current_missing is not None:
+ end_line = sorted_lines[idx - 1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+ current_missing = None
+
+ if current_missing is not None:
+ end_line = sorted_lines[-1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+
+ file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
+
+ if total_stmts == 0:
+ return ''
+
+ total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
+ stmts_padding = max(8, len(str(total_stmts)))
+ miss_padding = max(7, len(str(total_miss)))
+
+ line_length = name_padding + stmts_padding + miss_padding + cover_padding
+
+ header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
+ 'Cover'.rjust(cover_padding)
+
+ if args.show_missing:
+ header += 'Lines Missing'.rjust(16)
+ line_length += 16
+
+ line_break = '-' * line_length
+ lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
+ str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
+ ' ' + ', '.join(f['missing']) if args.show_missing else '')
+ for f in file_report]
+ totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
+ str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
+
+ report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
+ return report
+
+
+class CoverageReportConfig(CoverageCombineConfig):
+ """Configuration for the coverage report command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args)
+
+ self.show_missing: bool = args.show_missing
+ self.include: str = args.include
+ self.omit: str = args.omit
diff --git a/test/lib/ansible_test/_internal/commands/coverage/xml.py b/test/lib/ansible_test/_internal/commands/coverage/xml.py
new file mode 100644
index 0000000..243c9a9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/coverage/xml.py
@@ -0,0 +1,189 @@
+"""Generate XML code coverage reports."""
+from __future__ import annotations
+
+import os
+import time
+
+from xml.etree.ElementTree import (
+ Comment,
+ Element,
+ SubElement,
+ tostring,
+)
+
+from xml.dom import (
+ minidom,
+)
+
+from ...io import (
+ make_dirs,
+ read_json_file,
+)
+
+from ...util_common import (
+ ResultType,
+ write_text_test_results,
+)
+
+from ...util import (
+ get_ansible_version,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...provisioning import (
+ prepare_profiles,
+)
+
+from .combine import (
+ combine_coverage_files,
+ CoverageCombineConfig,
+)
+
+from . import (
+ run_coverage,
+)
+
+
+def command_coverage_xml(args: CoverageXmlConfig) -> None:
+ """Generate an XML coverage report."""
+ host_state = prepare_profiles(args) # coverage xml
+ output_files = combine_coverage_files(args, host_state)
+
+ for output_file in output_files:
+ xml_name = '%s.xml' % os.path.basename(output_file)
+ if output_file.endswith('-powershell'):
+ report = _generate_powershell_xml(output_file)
+
+ rough_string = tostring(report, 'utf-8')
+ reparsed = minidom.parseString(rough_string)
+ pretty = reparsed.toprettyxml(indent=' ')
+
+ write_text_test_results(ResultType.REPORTS, xml_name, pretty)
+ else:
+ xml_path = os.path.join(ResultType.REPORTS.path, xml_name)
+ make_dirs(ResultType.REPORTS.path)
+ run_coverage(args, host_state, output_file, 'xml', ['-i', '-o', xml_path])
+
+
+def _generate_powershell_xml(coverage_file: str) -> Element:
+ """Generate a PowerShell coverage report XML element from the specified coverage file and return it."""
+ coverage_info = read_json_file(coverage_file)
+
+ content_root = data_context().content.root
+ is_ansible = data_context().content.is_ansible
+
+ packages: dict[str, dict[str, dict[str, int]]] = {}
+ for path, results in coverage_info.items():
+ filename = os.path.splitext(os.path.basename(path))[0]
+
+ if filename.startswith('Ansible.ModuleUtils'):
+ package = 'ansible.module_utils'
+ elif is_ansible:
+ package = 'ansible.modules'
+ else:
+ rel_path = path[len(content_root) + 1:]
+ plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils"
+ package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type)
+
+ if package not in packages:
+ packages[package] = {}
+
+ packages[package][path] = results
+
+ elem_coverage = Element('coverage')
+ elem_coverage.append(
+ Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ '))
+ elem_coverage.append(
+ Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd '))
+
+ elem_sources = SubElement(elem_coverage, 'sources')
+
+ elem_source = SubElement(elem_sources, 'source')
+ elem_source.text = data_context().content.root
+
+ elem_packages = SubElement(elem_coverage, 'packages')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for package_name, package_data in packages.items():
+ lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_coverage.attrib.update({
+ 'branch-rate': '0',
+ 'branches-covered': '0',
+ 'branches-valid': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'lines-covered': str(total_line_count),
+ 'lines-valid': str(total_lines_hit),
+ 'timestamp': str(int(time.time())),
+ 'version': get_ansible_version(),
+ })
+
+ return elem_coverage
+
+
+def _add_cobertura_package(packages: Element, package_name: str, package_data: dict[str, dict[str, int]]) -> tuple[int, int]:
+ """Add a package element to the given packages element."""
+ elem_package = SubElement(packages, 'package')
+ elem_classes = SubElement(elem_package, 'classes')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for path, results in package_data.items():
+ lines_hit = len([True for hits in results.values() if hits])
+ line_count = len(results)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_class = SubElement(elem_classes, 'class')
+
+ class_name = os.path.splitext(os.path.basename(path))[0]
+ if class_name.startswith("Ansible.ModuleUtils"):
+ class_name = class_name[20:]
+
+ content_root = data_context().content.root
+ filename = path
+ if filename.startswith(content_root):
+ filename = filename[len(content_root) + 1:]
+
+ elem_class.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'filename': filename,
+ 'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
+ 'name': class_name,
+ })
+
+ SubElement(elem_class, 'methods')
+
+ elem_lines = SubElement(elem_class, 'lines')
+
+ for number, hits in results.items():
+ elem_line = SubElement(elem_lines, 'line')
+ elem_line.attrib.update(
+ hits=str(hits),
+ number=str(number),
+ )
+
+ elem_package.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'name': package_name,
+ })
+
+ return total_lines_hit, total_line_count
+
+
+class CoverageXmlConfig(CoverageCombineConfig):
+ """Configuration for the coverage xml command."""
diff --git a/test/lib/ansible_test/_internal/commands/env/__init__.py b/test/lib/ansible_test/_internal/commands/env/__init__.py
new file mode 100644
index 0000000..44f229f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/env/__init__.py
@@ -0,0 +1,197 @@
+"""Show information about the test environment."""
+from __future__ import annotations
+
+import datetime
+import os
+import platform
+import sys
+import typing as t
+
+from ...config import (
+ CommonConfig,
+)
+
+from ...io import (
+ write_json_file,
+)
+
+from ...util import (
+ display,
+ get_ansible_version,
+ get_available_python_versions,
+ ApplicationError,
+)
+
+from ...util_common import (
+ data_context,
+ write_json_test_results,
+ ResultType,
+)
+
+from ...docker_util import (
+ get_docker_command,
+ get_docker_info,
+ get_docker_container_id,
+)
+
+from ...constants import (
+ TIMEOUT_PATH,
+)
+
+from ...ci import (
+ get_ci_provider,
+)
+
+
+class EnvConfig(CommonConfig):
+ """Configuration for the `env` command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'env')
+
+ self.show = args.show
+ self.dump = args.dump
+ self.timeout = args.timeout
+ self.list_files = args.list_files
+
+ if not self.show and not self.dump and self.timeout is None and not self.list_files:
+ # default to --show if no options were given
+ self.show = True
+
+
+def command_env(args: EnvConfig) -> None:
+ """Entry point for the `env` command."""
+ show_dump_env(args)
+ list_files_env(args)
+ set_timeout(args)
+
+
+def show_dump_env(args: EnvConfig) -> None:
+ """Show information about the current environment and/or write the information to disk."""
+ if not args.show and not args.dump:
+ return
+
+ container_id = get_docker_container_id()
+
+ data = dict(
+ ansible=dict(
+ version=get_ansible_version(),
+ ),
+ docker=get_docker_details(args),
+ container_id=container_id,
+ environ=os.environ.copy(),
+ location=dict(
+ pwd=os.environ.get('PWD', None),
+ cwd=os.getcwd(),
+ ),
+ git=get_ci_provider().get_git_details(args),
+ platform=dict(
+ datetime=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
+ platform=platform.platform(),
+ uname=platform.uname(),
+ ),
+ python=dict(
+ executable=sys.executable,
+ version=platform.python_version(),
+ ),
+ interpreters=get_available_python_versions(),
+ )
+
+ if args.show:
+ verbose = {
+ 'docker': 3,
+ 'docker.executable': 0,
+ 'environ': 2,
+ 'platform.uname': 1,
+ }
+
+ show_dict(data, verbose)
+
+ if args.dump and not args.explain:
+ write_json_test_results(ResultType.BOT, 'data-environment.json', data)
+
+
+def list_files_env(args: EnvConfig) -> None:
+ """List files on stdout."""
+ if not args.list_files:
+ return
+
+ for path in data_context().content.all_files():
+ display.info(path)
+
+
+def set_timeout(args: EnvConfig) -> None:
+ """Set an execution timeout for subsequent ansible-test invocations."""
+ if args.timeout is None:
+ return
+
+ if args.timeout:
+ deadline = (datetime.datetime.utcnow() + datetime.timedelta(minutes=args.timeout)).strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ display.info('Setting a %d minute test timeout which will end at: %s' % (args.timeout, deadline), verbosity=1)
+ else:
+ deadline = None
+
+ display.info('Clearing existing test timeout.', verbosity=1)
+
+ if args.explain:
+ return
+
+ if deadline:
+ data = dict(
+ duration=args.timeout,
+ deadline=deadline,
+ )
+
+ write_json_file(TIMEOUT_PATH, data)
+ elif os.path.exists(TIMEOUT_PATH):
+ os.remove(TIMEOUT_PATH)
+
+
+def show_dict(data: dict[str, t.Any], verbose: dict[str, int], root_verbosity: int = 0, path: t.Optional[list[str]] = None) -> None:
+ """Show a dict with varying levels of verbosity."""
+ path = path if path else []
+
+ for key, value in sorted(data.items()):
+ indent = ' ' * len(path)
+ key_path = path + [key]
+ key_name = '.'.join(key_path)
+ verbosity = verbose.get(key_name, root_verbosity)
+
+ if isinstance(value, (tuple, list)):
+ display.info(indent + '%s:' % key, verbosity=verbosity)
+ for item in value:
+ display.info(indent + ' - %s' % item, verbosity=verbosity)
+ elif isinstance(value, dict):
+ min_verbosity = min([verbosity] + [v for k, v in verbose.items() if k.startswith('%s.' % key)])
+ display.info(indent + '%s:' % key, verbosity=min_verbosity)
+ show_dict(value, verbose, verbosity, key_path)
+ else:
+ display.info(indent + '%s: %s' % (key, value), verbosity=verbosity)
+
+
+def get_docker_details(args: EnvConfig) -> dict[str, t.Any]:
+ """Return details about docker."""
+ docker = get_docker_command()
+
+ executable = None
+ info = None
+ version = None
+
+ if docker:
+ executable = docker.executable
+
+ try:
+ docker_info = get_docker_info(args)
+ except ApplicationError as ex:
+ display.warning(str(ex))
+ else:
+ info = docker_info.info
+ version = docker_info.version
+
+ docker_details = dict(
+ executable=executable,
+ info=info,
+ version=version,
+ )
+
+ return docker_details
diff --git a/test/lib/ansible_test/_internal/commands/integration/__init__.py b/test/lib/ansible_test/_internal/commands/integration/__init__.py
new file mode 100644
index 0000000..8864d2e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/__init__.py
@@ -0,0 +1,967 @@
+"""Ansible integration test infrastructure."""
+from __future__ import annotations
+
+import collections.abc as c
+import contextlib
+import datetime
+import json
+import os
+import re
+import shutil
+import tempfile
+import time
+import typing as t
+
+from ...encoding import (
+ to_bytes,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+)
+
+from ...executor import (
+ get_changes_filter,
+ AllTargetsSkipped,
+ Delegate,
+ ListTargets,
+)
+
+from ...python_requirements import (
+ install_requirements,
+)
+
+from ...ci import (
+ get_ci_provider,
+)
+
+from ...target import (
+ analyze_integration_target_dependencies,
+ walk_integration_targets,
+ IntegrationTarget,
+ walk_internal_targets,
+ TIntegrationTarget,
+ IntegrationTargetType,
+)
+
+from ...config import (
+ IntegrationConfig,
+ NetworkIntegrationConfig,
+ PosixIntegrationConfig,
+ WindowsIntegrationConfig,
+ TIntegrationConfig,
+)
+
+from ...io import (
+ make_dirs,
+ read_text_file,
+)
+
+from ...util import (
+ ApplicationError,
+ display,
+ SubprocessError,
+ remove_tree,
+)
+
+from ...util_common import (
+ named_temporary_file,
+ ResultType,
+ run_command,
+ write_json_test_results,
+ check_pyyaml,
+)
+
+from ...coverage_util import (
+ cover_python,
+)
+
+from ...cache import (
+ CommonCache,
+)
+
+from .cloud import (
+ CloudEnvironmentConfig,
+ cloud_filter,
+ cloud_init,
+ get_cloud_environment,
+ get_cloud_platforms,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ InventoryConfig,
+ OriginConfig,
+)
+
+from ...host_profiles import (
+ ControllerProfile,
+ ControllerHostProfile,
+ HostProfile,
+ PosixProfile,
+ SshTargetHostProfile,
+)
+
+from ...provisioning import (
+ HostState,
+ prepare_profiles,
+)
+
+from ...pypi_proxy import (
+ configure_pypi_proxy,
+)
+
+from ...inventory import (
+ create_controller_inventory,
+ create_windows_inventory,
+ create_network_inventory,
+ create_posix_inventory,
+)
+
+from .filters import (
+ get_target_filter,
+)
+
+from .coverage import (
+ CoverageManager,
+)
+
+THostProfile = t.TypeVar('THostProfile', bound=HostProfile)
+
+
+def generate_dependency_map(integration_targets: list[IntegrationTarget]) -> dict[str, set[IntegrationTarget]]:
+ """Analyze the given list of integration test targets and return a dictionary expressing target names and the targets on which they depend."""
+ targets_dict = dict((target.name, target) for target in integration_targets)
+ target_dependencies = analyze_integration_target_dependencies(integration_targets)
+ dependency_map: dict[str, set[IntegrationTarget]] = {}
+
+ invalid_targets = set()
+
+ for dependency, dependents in target_dependencies.items():
+ dependency_target = targets_dict.get(dependency)
+
+ if not dependency_target:
+ invalid_targets.add(dependency)
+ continue
+
+ for dependent in dependents:
+ if dependent not in dependency_map:
+ dependency_map[dependent] = set()
+
+ dependency_map[dependent].add(dependency_target)
+
+ if invalid_targets:
+ raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
+
+ return dependency_map
+
+
+def get_files_needed(target_dependencies: list[IntegrationTarget]) -> list[str]:
+ """Return a list of files needed by the given list of target dependencies."""
+ files_needed: list[str] = []
+
+ for target_dependency in target_dependencies:
+ files_needed += target_dependency.needs_file
+
+ files_needed = sorted(set(files_needed))
+
+ invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
+
+ if invalid_paths:
+ raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
+
+ return files_needed
+
+
+def check_inventory(args: IntegrationConfig, inventory_path: str) -> None:
+ """Check the given inventory for issues."""
+ if not isinstance(args.controller, OriginConfig):
+ if os.path.exists(inventory_path):
+ inventory = read_text_file(inventory_path)
+
+ if 'ansible_ssh_private_key_file' in inventory:
+ display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.')
+
+
+def get_inventory_absolute_path(args: IntegrationConfig, target: InventoryConfig) -> str:
+ """Return the absolute inventory path used for the given integration configuration or target inventory config (if provided)."""
+ path = target.path or os.path.basename(get_inventory_relative_path(args))
+
+ if args.host_path:
+ path = os.path.join(data_context().content.root, path) # post-delegation, path is relative to the content root
+ else:
+ path = os.path.join(data_context().content.root, data_context().content.integration_path, path)
+
+ return path
+
+
+def get_inventory_relative_path(args: IntegrationConfig) -> str:
+ """Return the inventory path used for the given integration configuration relative to the content root."""
+ inventory_names: dict[t.Type[IntegrationConfig], str] = {
+ PosixIntegrationConfig: 'inventory',
+ WindowsIntegrationConfig: 'inventory.winrm',
+ NetworkIntegrationConfig: 'inventory.networking',
+ }
+
+ return os.path.join(data_context().content.integration_path, inventory_names[type(args)])
+
+
+def delegate_inventory(args: IntegrationConfig, inventory_path_src: str) -> None:
+ """Make the given inventory available during delegation."""
+ if isinstance(args, PosixIntegrationConfig):
+ return
+
+ def inventory_callback(files: list[tuple[str, str]]) -> None:
+ """
+ Add the inventory file to the payload file list.
+ This will preserve the file during delegation even if it is ignored or is outside the content and install roots.
+ """
+ inventory_path = get_inventory_relative_path(args)
+ inventory_tuple = inventory_path_src, inventory_path
+
+ if os.path.isfile(inventory_path_src) and inventory_tuple not in files:
+ originals = [item for item in files if item[1] == inventory_path]
+
+ if originals:
+ for original in originals:
+ files.remove(original)
+
+ display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src))
+ else:
+ display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src))
+
+ files.append(inventory_tuple)
+
+ data_context().register_payload_callback(inventory_callback)
+
+
+@contextlib.contextmanager
+def integration_test_environment(
+ args: IntegrationConfig,
+ target: IntegrationTarget,
+ inventory_path_src: str,
+) -> c.Iterator[IntegrationEnvironment]:
+ """Context manager that prepares the integration test environment and cleans it up."""
+ ansible_config_src = args.get_ansible_config()
+ ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command)
+
+ if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
+ display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+
+ integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path)
+ targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path)
+ inventory_path = inventory_path_src
+ ansible_config = ansible_config_src
+ vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ yield IntegrationEnvironment(data_context().content.root, integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ return
+
+ # When testing a collection, the temporary directory must reside within the collection.
+ # This is necessary to enable support for the default collection for non-collection content (playbooks and roles).
+ root_temp_dir = os.path.join(ResultType.TMP.path, 'integration')
+
+ prefix = '%s-' % target.name
+ suffix = '-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
+
+ if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
+ display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+ suffix = '-ansible'
+
+ if args.explain:
+ temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
+ else:
+ make_dirs(root_temp_dir)
+ temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+
+ try:
+ display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(temp_dir, inventory_relative_path)
+
+ cache = IntegrationCache(args)
+
+ target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
+
+ files_needed = get_files_needed(target_dependencies)
+
+ integration_dir = os.path.join(temp_dir, data_context().content.integration_path)
+ targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path)
+ ansible_config = os.path.join(temp_dir, ansible_config_relative)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+ vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path)
+
+ file_copies = [
+ (ansible_config_src, ansible_config),
+ (inventory_path_src, inventory_path),
+ ]
+
+ if os.path.exists(vars_file_src):
+ file_copies.append((vars_file_src, vars_file))
+
+ file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
+
+ integration_targets_relative_path = data_context().content.integration_targets_path
+
+ directory_copies = [
+ (
+ os.path.join(integration_targets_relative_path, target.relative_path),
+ os.path.join(temp_dir, integration_targets_relative_path, target.relative_path)
+ )
+ for target in target_dependencies
+ ]
+
+ directory_copies = sorted(set(directory_copies))
+ file_copies = sorted(set(file_copies))
+
+ if not args.explain:
+ make_dirs(integration_dir)
+
+ for dir_src, dir_dst in directory_copies:
+ display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
+
+ if not args.explain:
+ shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True) # type: ignore[arg-type] # incorrect type stub omits bytes path support
+
+ for file_src, file_dst in file_copies:
+ display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
+
+ if not args.explain:
+ make_dirs(os.path.dirname(file_dst))
+ shutil.copy2(file_src, file_dst)
+
+ yield IntegrationEnvironment(temp_dir, integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ finally:
+ if not args.explain:
+ remove_tree(temp_dir)
+
+
+@contextlib.contextmanager
+def integration_test_config_file(
+ args: IntegrationConfig,
+ env_config: CloudEnvironmentConfig,
+ integration_dir: str,
+) -> c.Iterator[t.Optional[str]]:
+ """Context manager that provides a config file for integration tests, if needed."""
+ if not env_config:
+ yield None
+ return
+
+ config_vars = (env_config.ansible_vars or {}).copy()
+
+ config_vars.update(dict(
+ ansible_test=dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ )
+ ))
+
+ config_file = json.dumps(config_vars, indent=4, sort_keys=True)
+
+ with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path: # type: str
+ filename = os.path.relpath(path, integration_dir)
+
+ display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3)
+
+ yield path
+
+
+def create_inventory(
+ args: IntegrationConfig,
+ host_state: HostState,
+ inventory_path: str,
+ target: IntegrationTarget,
+) -> None:
+ """Create inventory."""
+ if isinstance(args, PosixIntegrationConfig):
+ if target.target_type == IntegrationTargetType.CONTROLLER:
+ display.info('Configuring controller inventory.', verbosity=1)
+ create_controller_inventory(args, inventory_path, host_state.controller_profile)
+ elif target.target_type == IntegrationTargetType.TARGET:
+ display.info('Configuring target inventory.', verbosity=1)
+ create_posix_inventory(args, inventory_path, host_state.target_profiles, 'needs/ssh/' in target.aliases)
+ else:
+ raise Exception(f'Unhandled test type for target "{target.name}": {target.target_type.name.lower()}')
+ elif isinstance(args, WindowsIntegrationConfig):
+ display.info('Configuring target inventory.', verbosity=1)
+ target_profiles = filter_profiles_for_target(args, host_state.target_profiles, target)
+ create_windows_inventory(args, inventory_path, target_profiles)
+ elif isinstance(args, NetworkIntegrationConfig):
+ display.info('Configuring target inventory.', verbosity=1)
+ target_profiles = filter_profiles_for_target(args, host_state.target_profiles, target)
+ create_network_inventory(args, inventory_path, target_profiles)
+
+
+def command_integration_filtered(
+ args: IntegrationConfig,
+ host_state: HostState,
+ targets: tuple[IntegrationTarget, ...],
+ all_targets: tuple[IntegrationTarget, ...],
+ inventory_path: str,
+ pre_target: t.Optional[c.Callable[[IntegrationTarget], None]] = None,
+ post_target: t.Optional[c.Callable[[IntegrationTarget], None]] = None,
+):
+ """Run integration tests for the specified targets."""
+ found = False
+ passed = []
+ failed = []
+
+ targets_iter = iter(targets)
+ all_targets_dict = dict((target.name, target) for target in all_targets)
+
+ setup_errors = []
+ setup_targets_executed: set[str] = set()
+
+ for target in all_targets:
+ for setup_target in target.setup_once + target.setup_always:
+ if setup_target not in all_targets_dict:
+ setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))
+
+ if setup_errors:
+ raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))
+
+ check_pyyaml(host_state.controller_profile.python)
+
+ test_dir = os.path.join(ResultType.TMP.path, 'output_dir')
+
+ if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
+ max_tries = 20
+ display.info('SSH connection to controller required by tests. Checking the connection.')
+ for i in range(1, max_tries + 1):
+ try:
+ run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
+ display.info('SSH service responded.')
+ break
+ except SubprocessError:
+ if i == max_tries:
+ raise
+ seconds = 3
+ display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
+ time.sleep(seconds)
+
+ start_at_task = args.start_at_task
+
+ results = {}
+
+ target_profile = host_state.target_profiles[0]
+
+ if isinstance(target_profile, PosixProfile):
+ target_python = target_profile.python
+
+ if isinstance(target_profile, ControllerProfile):
+ if host_state.controller_profile.python.path != target_profile.python.path:
+ install_requirements(args, target_python, command=True, controller=False) # integration
+ elif isinstance(target_profile, SshTargetHostProfile):
+ connection = target_profile.get_controller_target_connections()[0]
+ install_requirements(args, target_python, command=True, controller=False, connection=connection) # integration
+
+ coverage_manager = CoverageManager(args, host_state, inventory_path)
+ coverage_manager.setup()
+
+ try:
+ for target in targets_iter:
+ if args.start_at and not found:
+ found = target.name == args.start_at
+
+ if not found:
+ continue
+
+ create_inventory(args, host_state, inventory_path, target)
+
+ tries = 2 if args.retry_on_error else 1
+ verbosity = args.verbosity
+
+ cloud_environment = get_cloud_environment(args, target)
+
+ try:
+ while tries:
+ tries -= 1
+
+ try:
+ if cloud_environment:
+ cloud_environment.setup_once()
+
+ run_setup_targets(args, host_state, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, inventory_path,
+ coverage_manager, False)
+
+ start_time = time.time()
+
+ if pre_target:
+ pre_target(target)
+
+ run_setup_targets(args, host_state, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path,
+ coverage_manager, True)
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ try:
+ if target.script_path:
+ command_integration_script(args, host_state, target, test_dir, inventory_path, coverage_manager)
+ else:
+ command_integration_role(args, host_state, target, start_at_task, test_dir, inventory_path, coverage_manager)
+ start_at_task = None
+ finally:
+ if post_target:
+ post_target(target)
+
+ end_time = time.time()
+
+ results[target.name] = dict(
+ name=target.name,
+ type=target.type,
+ aliases=target.aliases,
+ modules=target.modules,
+ run_time_seconds=int(end_time - start_time),
+ setup_once=target.setup_once,
+ setup_always=target.setup_always,
+ )
+
+ break
+ except SubprocessError:
+ if cloud_environment:
+ cloud_environment.on_failure(target, tries)
+
+ if not tries:
+ raise
+
+ if target.retry_never:
+ display.warning(f'Skipping retry of test target "{target.name}" since it has been excluded from retries.')
+ raise
+
+ display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
+ display.verbosity = args.verbosity = 6
+
+ passed.append(target)
+ except Exception as ex:
+ failed.append(target)
+
+ if args.continue_on_error:
+ display.error(str(ex))
+ continue
+
+ display.notice('To resume at this test target, use the option: --start-at %s' % target.name)
+
+ next_target = next(targets_iter, None)
+
+ if next_target:
+ display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)
+
+ raise
+ finally:
+ display.verbosity = args.verbosity = verbosity
+
+ finally:
+ if not args.explain:
+ coverage_manager.teardown()
+
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ targets=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+ if failed:
+ raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
+ len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
+
+
+def command_integration_script(
+ args: IntegrationConfig,
+ host_state: HostState,
+ target: IntegrationTarget,
+ test_dir: str,
+ inventory_path: str,
+ coverage_manager: CoverageManager,
+):
+ """Run an integration test script."""
+ display.info('Running %s integration test script' % target.name)
+
+ env_config = None
+
+ if isinstance(args, PosixIntegrationConfig):
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ if env_config:
+ display.info('>>> Environment Config\n%s' % json.dumps(dict(
+ env_vars=env_config.env_vars,
+ ansible_vars=env_config.ansible_vars,
+ callback_plugins=env_config.callback_plugins,
+ module_defaults=env_config.module_defaults,
+ ), indent=4, sort_keys=True), verbosity=3)
+
+ with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment
+ cmd = ['./%s' % os.path.basename(target.script_path)]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config, test_env)
+ cwd = os.path.join(test_env.targets_dir, target.relative_path)
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ if env_config and env_config.env_vars:
+ env.update(env_config.env_vars)
+
+ with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path: # type: t.Optional[str]
+ if config_path:
+ cmd += ['-e', '@%s' % config_path]
+
+ env.update(coverage_manager.get_environment(target.name, target.aliases))
+ cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False)
+
+
+def command_integration_role(
+ args: IntegrationConfig,
+ host_state: HostState,
+ target: IntegrationTarget,
+ start_at_task: t.Optional[str],
+ test_dir: str,
+ inventory_path: str,
+ coverage_manager: CoverageManager,
+):
+ """Run an integration test role."""
+ display.info('Running %s integration test role' % target.name)
+
+ env_config = None
+
+ vars_files = []
+ variables = dict(
+ output_dir=test_dir,
+ )
+
+ if isinstance(args, WindowsIntegrationConfig):
+ hosts = 'windows'
+ gather_facts = False
+ variables.update(dict(
+ win_output_dir=r'C:\ansible_testing',
+ ))
+ elif isinstance(args, NetworkIntegrationConfig):
+ hosts = target.network_platform
+ gather_facts = False
+ else:
+ hosts = 'testhost'
+ gather_facts = True
+
+ if 'gather_facts/yes/' in target.aliases:
+ gather_facts = True
+ elif 'gather_facts/no/' in target.aliases:
+ gather_facts = False
+
+ if not isinstance(args, NetworkIntegrationConfig):
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ if env_config:
+ display.info('>>> Environment Config\n%s' % json.dumps(dict(
+ env_vars=env_config.env_vars,
+ ansible_vars=env_config.ansible_vars,
+ callback_plugins=env_config.callback_plugins,
+ module_defaults=env_config.module_defaults,
+ ), indent=4, sort_keys=True), verbosity=3)
+
+ with integration_test_environment(args, target, inventory_path) as test_env: # type: IntegrationEnvironment
+ if os.path.exists(test_env.vars_file):
+ vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))
+
+ play = dict(
+ hosts=hosts,
+ gather_facts=gather_facts,
+ vars_files=vars_files,
+ vars=variables,
+ roles=[
+ target.name,
+ ],
+ )
+
+ if env_config:
+ if env_config.ansible_vars:
+ variables.update(env_config.ansible_vars)
+
+ play.update(dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ ))
+
+ playbook = json.dumps([play], indent=4, sort_keys=True)
+
+ with named_temporary_file(args=args, directory=test_env.integration_dir, prefix='%s-' % target.name, suffix='.yml', content=playbook) as playbook_path:
+ filename = os.path.basename(playbook_path)
+
+ display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3)
+
+ cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)]
+
+ if start_at_task:
+ cmd += ['--start-at-task', start_at_task]
+
+ if args.tags:
+ cmd += ['--tags', args.tags]
+
+ if args.skip_tags:
+ cmd += ['--skip-tags', args.skip_tags]
+
+ if args.diff:
+ cmd += ['--diff']
+
+ if isinstance(args, NetworkIntegrationConfig):
+ if args.testcase:
+ cmd += ['-e', 'testcase=%s' % args.testcase]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config, test_env)
+ cwd = test_env.integration_dir
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ if env_config and env_config.env_vars:
+ env.update(env_config.env_vars)
+
+ env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir
+
+ env.update(coverage_manager.get_environment(target.name, target.aliases))
+ cover_python(args, host_state.controller_profile.python, cmd, target.name, env, cwd=cwd, capture=False)
+
+
+def run_setup_targets(
+ args: IntegrationConfig,
+ host_state: HostState,
+ test_dir: str,
+ target_names: c.Sequence[str],
+ targets_dict: dict[str, IntegrationTarget],
+ targets_executed: set[str],
+ inventory_path: str,
+ coverage_manager: CoverageManager,
+ always: bool,
+):
+ """Run setup targets."""
+ for target_name in target_names:
+ if not always and target_name in targets_executed:
+ continue
+
+ target = targets_dict[target_name]
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ if target.script_path:
+ command_integration_script(args, host_state, target, test_dir, inventory_path, coverage_manager)
+ else:
+ command_integration_role(args, host_state, target, None, test_dir, inventory_path, coverage_manager)
+
+ targets_executed.add(target_name)
+
+
+def integration_environment(
+ args: IntegrationConfig,
+ target: IntegrationTarget,
+ test_dir: str,
+ inventory_path: str,
+ ansible_config: t.Optional[str],
+ env_config: t.Optional[CloudEnvironmentConfig],
+ test_env: IntegrationEnvironment,
+) -> dict[str, str]:
+ """Return a dictionary of environment variables to use when running the given integration test target."""
+ env = ansible_environment(args, ansible_config=ansible_config)
+
+ callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else [])
+
+ integration = dict(
+ JUNIT_OUTPUT_DIR=ResultType.JUNIT.path,
+ JUNIT_TASK_RELATIVE_PATH=test_env.test_dir,
+ JUNIT_REPLACE_OUT_OF_TREE_PATH='out-of-tree:',
+ ANSIBLE_CALLBACKS_ENABLED=','.join(sorted(set(callback_plugins))),
+ ANSIBLE_TEST_CI=args.metadata.ci_provider or get_ci_provider().code,
+ ANSIBLE_TEST_COVERAGE='check' if args.coverage_check else ('yes' if args.coverage else ''),
+ OUTPUT_DIR=test_dir,
+ INVENTORY_PATH=os.path.abspath(inventory_path),
+ )
+
+ if args.debug_strategy:
+ env.update(dict(ANSIBLE_STRATEGY='debug'))
+
+ if 'non_local/' in target.aliases:
+ if args.coverage:
+ display.warning('Skipping coverage reporting on Ansible modules for non-local test: %s' % target.name)
+
+ env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER=''))
+
+ env.update(integration)
+
+ return env
+
+
+class IntegrationEnvironment:
+ """Details about the integration environment."""
+ def __init__(self, test_dir: str, integration_dir: str, targets_dir: str, inventory_path: str, ansible_config: str, vars_file: str) -> None:
+ self.test_dir = test_dir
+ self.integration_dir = integration_dir
+ self.targets_dir = targets_dir
+ self.inventory_path = inventory_path
+ self.ansible_config = ansible_config
+ self.vars_file = vars_file
+
+
+class IntegrationCache(CommonCache):
+ """Integration cache."""
+ @property
+ def integration_targets(self) -> list[IntegrationTarget]:
+ """The list of integration test targets."""
+ return self.get('integration_targets', lambda: list(walk_integration_targets()))
+
+ @property
+ def dependency_map(self) -> dict[str, set[IntegrationTarget]]:
+ """The dependency map of integration test targets."""
+ return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
+
+
+def filter_profiles_for_target(args: IntegrationConfig, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]:
+ """Return a list of profiles after applying target filters."""
+ if target.target_type == IntegrationTargetType.CONTROLLER:
+ profile_filter = get_target_filter(args, [args.controller], True)
+ elif target.target_type == IntegrationTargetType.TARGET:
+ profile_filter = get_target_filter(args, args.targets, False)
+ else:
+ raise Exception(f'Unhandled test type for target "{target.name}": {target.target_type.name.lower()}')
+
+ profiles = profile_filter.filter_profiles(profiles, target)
+
+ return profiles
+
+
+def get_integration_filter(args: IntegrationConfig, targets: list[IntegrationTarget]) -> set[str]:
+ """Return a list of test targets to skip based on the host(s) that will be used to run the specified test targets."""
+ invalid_targets = sorted(target.name for target in targets if target.target_type not in (IntegrationTargetType.CONTROLLER, IntegrationTargetType.TARGET))
+
+ if invalid_targets and not args.list_targets:
+ message = f'''Unable to determine context for the following test targets: {", ".join(invalid_targets)}
+
+Make sure the test targets are correctly named:
+
+ - Modules - The target name should match the module name.
+ - Plugins - The target name should be "{{plugin_type}}_{{plugin_name}}".
+
+If necessary, context can be controlled by adding entries to the "aliases" file for a test target:
+
+ - Add the name(s) of modules which are tested.
+ - Add "context/target" for module and module_utils tests (these will run on the target host).
+ - Add "context/controller" for other test types (these will run on the controller).'''
+
+ raise ApplicationError(message)
+
+ invalid_targets = sorted(target.name for target in targets if target.actual_type not in (IntegrationTargetType.CONTROLLER, IntegrationTargetType.TARGET))
+
+ if invalid_targets:
+ if data_context().content.is_ansible:
+ display.warning(f'Unable to determine context for the following test targets: {", ".join(invalid_targets)}')
+ else:
+ display.warning(f'Unable to determine context for the following test targets, they will be run on the target host: {", ".join(invalid_targets)}')
+
+ exclude: set[str] = set()
+
+ controller_targets = [target for target in targets if target.target_type == IntegrationTargetType.CONTROLLER]
+ target_targets = [target for target in targets if target.target_type == IntegrationTargetType.TARGET]
+
+ controller_filter = get_target_filter(args, [args.controller], True)
+ target_filter = get_target_filter(args, args.targets, False)
+
+ controller_filter.filter_targets(controller_targets, exclude)
+ target_filter.filter_targets(target_targets, exclude)
+
+ return exclude
+
+
+def command_integration_filter(args: TIntegrationConfig,
+ targets: c.Iterable[TIntegrationTarget],
+ ) -> tuple[HostState, tuple[TIntegrationTarget, ...]]:
+ """Filter the given integration test targets."""
+ targets = tuple(target for target in targets if 'hidden/' not in target.aliases)
+ changes = get_changes_filter(args)
+
+ # special behavior when the --changed-all-target target is selected based on changes
+ if args.changed_all_target in changes:
+ # act as though the --changed-all-target target was in the include list
+ if args.changed_all_mode == 'include' and args.changed_all_target not in args.include:
+ args.include.append(args.changed_all_target)
+ args.delegate_args += ['--include', args.changed_all_target]
+ # act as though the --changed-all-target target was in the exclude list
+ elif args.changed_all_mode == 'exclude' and args.changed_all_target not in args.exclude:
+ args.exclude.append(args.changed_all_target)
+
+ require = args.require + changes
+ exclude = args.exclude
+
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+ environment_exclude = get_integration_filter(args, list(internal_targets))
+
+ environment_exclude |= set(cloud_filter(args, internal_targets))
+
+ if environment_exclude:
+ exclude = sorted(set(exclude) | environment_exclude)
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+
+ if not internal_targets:
+ raise AllTargetsSkipped()
+
+ if args.start_at and not any(target.name == args.start_at for target in internal_targets):
+ raise ApplicationError('Start at target matches nothing: %s' % args.start_at)
+
+ cloud_init(args, internal_targets)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ if os.path.exists(vars_file_src):
+ def integration_config_callback(files: list[tuple[str, str]]) -> None:
+ """
+ Add the integration config vars file to the payload file list.
+ This will preserve the file during delegation even if the file is ignored by source control.
+ """
+ files.append((vars_file_src, data_context().content.integration_vars_path))
+
+ data_context().register_payload_callback(integration_config_callback)
+
+ if args.list_targets:
+ raise ListTargets([target.name for target in internal_targets])
+
+ # requirements are installed using a callback since the windows-integration and network-integration host status checks depend on them
+ host_state = prepare_profiles(args, targets_use_pypi=True, requirements=requirements) # integration, windows-integration, network-integration
+
+ if args.delegate:
+ raise Delegate(host_state=host_state, require=require, exclude=exclude)
+
+ return host_state, internal_targets
+
+
+def requirements(host_profile: HostProfile) -> None:
+ """Install requirements after bootstrapping and delegation."""
+ if isinstance(host_profile, ControllerHostProfile) and host_profile.controller:
+ configure_pypi_proxy(host_profile.args, host_profile) # integration, windows-integration, network-integration
+ install_requirements(host_profile.args, host_profile.python, ansible=True, command=True) # integration, windows-integration, network-integration
+ elif isinstance(host_profile, PosixProfile) and not isinstance(host_profile, ControllerProfile):
+ configure_pypi_proxy(host_profile.args, host_profile) # integration
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
new file mode 100644
index 0000000..0c078b9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
@@ -0,0 +1,389 @@
+"""Plugin system for cloud providers and environments for use in integration tests."""
+from __future__ import annotations
+
+import abc
+import atexit
+import datetime
+import os
+import re
+import tempfile
+import time
+import typing as t
+
+from ....encoding import (
+ to_bytes,
+)
+
+from ....io import (
+ read_text_file,
+)
+
+from ....util import (
+ ANSIBLE_TEST_CONFIG_ROOT,
+ ApplicationError,
+ display,
+ import_plugins,
+ load_plugins,
+ cache,
+)
+
+from ....util_common import (
+ ResultType,
+ write_json_test_results,
+)
+
+from ....target import (
+ IntegrationTarget,
+)
+
+from ....config import (
+ IntegrationConfig,
+ TestConfig,
+)
+
+from ....ci import (
+ get_ci_provider,
+)
+
+from ....data import (
+ data_context,
+)
+
+from ....docker_util import (
+ docker_available,
+)
+
+
+@cache
+def get_cloud_plugins() -> tuple[dict[str, t.Type[CloudProvider]], dict[str, t.Type[CloudEnvironment]]]:
+ """Import cloud plugins and load them into the plugin dictionaries."""
+ import_plugins('commands/integration/cloud')
+
+ providers: dict[str, t.Type[CloudProvider]] = {}
+ environments: dict[str, t.Type[CloudEnvironment]] = {}
+
+ load_plugins(CloudProvider, providers)
+ load_plugins(CloudEnvironment, environments)
+
+ return providers, environments
+
+
+@cache
+def get_provider_plugins() -> dict[str, t.Type[CloudProvider]]:
+ """Return a dictionary of the available cloud provider plugins."""
+ return get_cloud_plugins()[0]
+
+
+@cache
+def get_environment_plugins() -> dict[str, t.Type[CloudEnvironment]]:
+ """Return a dictionary of the available cloud environment plugins."""
+ return get_cloud_plugins()[1]
+
+
+def get_cloud_platforms(args: TestConfig, targets: t.Optional[tuple[IntegrationTarget, ...]] = None) -> list[str]:
+ """Return cloud platform names for the specified targets."""
+ if isinstance(args, IntegrationConfig):
+ if args.list_targets:
+ return []
+
+ if targets is None:
+ cloud_platforms = set(args.metadata.cloud_config or [])
+ else:
+ cloud_platforms = set(get_cloud_platform(target) for target in targets)
+
+ cloud_platforms.discard(None)
+
+ return sorted(cloud_platforms)
+
+
+def get_cloud_platform(target: IntegrationTarget) -> t.Optional[str]:
+ """Return the name of the cloud platform used for the given target, or None if no cloud platform is used."""
+ cloud_platforms = set(a.split('/')[1] for a in target.aliases if a.startswith('cloud/') and a.endswith('/') and a != 'cloud/')
+
+ if not cloud_platforms:
+ return None
+
+ if len(cloud_platforms) == 1:
+ cloud_platform = cloud_platforms.pop()
+
+ if cloud_platform not in get_provider_plugins():
+ raise ApplicationError('Target %s aliases contains unknown cloud platform: %s' % (target.name, cloud_platform))
+
+ return cloud_platform
+
+ raise ApplicationError('Target %s aliases contains multiple cloud platforms: %s' % (target.name, ', '.join(sorted(cloud_platforms))))
+
+
+def get_cloud_providers(args: IntegrationConfig, targets: t.Optional[tuple[IntegrationTarget, ...]] = None) -> list[CloudProvider]:
+ """Return a list of cloud providers for the given targets."""
+ return [get_provider_plugins()[p](args) for p in get_cloud_platforms(args, targets)]
+
+
+def get_cloud_environment(args: IntegrationConfig, target: IntegrationTarget) -> t.Optional[CloudEnvironment]:
+ """Return the cloud environment for the given target, or None if no cloud environment is used for the target."""
+ cloud_platform = get_cloud_platform(target)
+
+ if not cloud_platform:
+ return None
+
+ return get_environment_plugins()[cloud_platform](args)
+
+
+def cloud_filter(args: IntegrationConfig, targets: tuple[IntegrationTarget, ...]) -> list[str]:
+ """Return a list of target names to exclude based on the given targets."""
+ if args.metadata.cloud_config is not None:
+ return [] # cloud filter already performed prior to delegation
+
+ exclude: list[str] = []
+
+ for provider in get_cloud_providers(args, targets):
+ provider.filter(targets, exclude)
+
+ return exclude
+
+
+def cloud_init(args: IntegrationConfig, targets: tuple[IntegrationTarget, ...]) -> None:
+ """Initialize cloud plugins for the given targets."""
+ if args.metadata.cloud_config is not None:
+ return # cloud configuration already established prior to delegation
+
+ args.metadata.cloud_config = {}
+
+ results = {}
+
+ for provider in get_cloud_providers(args, targets):
+ if args.prime_containers and not provider.uses_docker:
+ continue
+
+ args.metadata.cloud_config[provider.platform] = {}
+
+ start_time = time.time()
+ provider.setup()
+ end_time = time.time()
+
+ results[provider.platform] = dict(
+ platform=provider.platform,
+ setup_seconds=int(end_time - start_time),
+ targets=[target.name for target in targets],
+ )
+
+ if not args.explain and results:
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ clouds=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+
+class CloudBase(metaclass=abc.ABCMeta):
+ """Base class for cloud plugins."""
+ _CONFIG_PATH = 'config_path'
+ _RESOURCE_PREFIX = 'resource_prefix'
+ _MANAGED = 'managed'
+ _SETUP_EXECUTED = 'setup_executed'
+
+ def __init__(self, args: IntegrationConfig) -> None:
+ self.args = args
+ self.platform = self.__module__.rsplit('.', 1)[-1]
+
+ def config_callback(files: list[tuple[str, str]]) -> None:
+ """Add the config file to the payload file list."""
+ if self.platform not in self.args.metadata.cloud_config:
+ return # platform was initialized, but not used -- such as being skipped due to all tests being disabled
+
+ if self._get_cloud_config(self._CONFIG_PATH, ''):
+ pair = (self.config_path, os.path.relpath(self.config_path, data_context().content.root))
+
+ if pair not in files:
+ display.info('Including %s config: %s -> %s' % (self.platform, pair[0], pair[1]), verbosity=3)
+ files.append(pair)
+
+ data_context().register_payload_callback(config_callback)
+
+ @property
+ def setup_executed(self) -> bool:
+ """True if setup has been executed, otherwise False."""
+ return t.cast(bool, self._get_cloud_config(self._SETUP_EXECUTED, False))
+
+ @setup_executed.setter
+ def setup_executed(self, value: bool) -> None:
+ """True if setup has been executed, otherwise False."""
+ self._set_cloud_config(self._SETUP_EXECUTED, value)
+
+ @property
+ def config_path(self) -> str:
+ """Path to the configuration file."""
+ return os.path.join(data_context().content.root, str(self._get_cloud_config(self._CONFIG_PATH)))
+
+ @config_path.setter
+ def config_path(self, value: str) -> None:
+ """Path to the configuration file."""
+ self._set_cloud_config(self._CONFIG_PATH, value)
+
+ @property
+ def resource_prefix(self) -> str:
+ """Resource prefix."""
+ return str(self._get_cloud_config(self._RESOURCE_PREFIX))
+
+ @resource_prefix.setter
+ def resource_prefix(self, value: str) -> None:
+ """Resource prefix."""
+ self._set_cloud_config(self._RESOURCE_PREFIX, value)
+
+ @property
+ def managed(self) -> bool:
+ """True if resources are managed by ansible-test, otherwise False."""
+ return t.cast(bool, self._get_cloud_config(self._MANAGED))
+
+ @managed.setter
+ def managed(self, value: bool) -> None:
+ """True if resources are managed by ansible-test, otherwise False."""
+ self._set_cloud_config(self._MANAGED, value)
+
+ def _get_cloud_config(self, key: str, default: t.Optional[t.Union[str, int, bool]] = None) -> t.Union[str, int, bool]:
+ """Return the specified value from the internal configuration."""
+ if default is not None:
+ return self.args.metadata.cloud_config[self.platform].get(key, default)
+
+ return self.args.metadata.cloud_config[self.platform][key]
+
+ def _set_cloud_config(self, key: str, value: t.Union[str, int, bool]) -> None:
+ """Set the specified key and value in the internal configuration."""
+ self.args.metadata.cloud_config[self.platform][key] = value
+
+
+class CloudProvider(CloudBase):
+ """Base class for cloud provider plugins. Sets up cloud resources before delegation."""
+ def __init__(self, args: IntegrationConfig, config_extension: str = '.ini') -> None:
+ super().__init__(args)
+
+ self.ci_provider = get_ci_provider()
+ self.remove_config = False
+ self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension)
+ self.config_static_path = os.path.join(data_context().content.integration_path, self.config_static_name)
+ self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
+ self.config_extension = config_extension
+
+ self.uses_config = False
+ self.uses_docker = False
+
+ def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
+ """Filter out the cloud tests when the necessary config and resources are not available."""
+ if not self.uses_docker and not self.uses_config:
+ return
+
+ if self.uses_docker and docker_available():
+ return
+
+ if self.uses_config and os.path.exists(self.config_static_path):
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+
+ if not self.uses_docker and self.uses_config:
+ display.warning('Excluding tests marked "%s" which require a "%s" config file (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
+ elif self.uses_docker and not self.uses_config:
+ display.warning('Excluding tests marked "%s" which requires container support: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+ elif self.uses_docker and self.uses_config:
+ display.warning('Excluding tests marked "%s" which requires container support or a "%s" config file (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ self.resource_prefix = self.ci_provider.generate_resource_prefix()
+ self.resource_prefix = re.sub(r'[^a-zA-Z0-9]+', '-', self.resource_prefix)[:63].lower().rstrip('-')
+
+ atexit.register(self.cleanup)
+
+ def cleanup(self) -> None:
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.remove_config:
+ os.remove(self.config_path)
+
+ def _use_static_config(self) -> bool:
+ """Use a static config file if available. Returns True if static config is used, otherwise returns False."""
+ if os.path.isfile(self.config_static_path):
+ display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1)
+ self.config_path = self.config_static_path
+ static = True
+ else:
+ static = False
+
+ self.managed = not static
+
+ return static
+
+ def _write_config(self, content: str) -> None:
+ """Write the given content to the config file."""
+ prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0]
+
+ with tempfile.NamedTemporaryFile(dir=data_context().content.integration_path, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd:
+ filename = os.path.join(data_context().content.integration_path, os.path.basename(config_fd.name))
+
+ self.config_path = filename
+ self.remove_config = True
+
+ display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3)
+
+ config_fd.write(to_bytes(content))
+ config_fd.flush()
+
+ def _read_config_template(self) -> str:
+ """Read and return the configuration template."""
+ lines = read_text_file(self.config_template_path).splitlines()
+ lines = [line for line in lines if not line.startswith('#')]
+ config = '\n'.join(lines).strip() + '\n'
+ return config
+
+ @staticmethod
+ def _populate_config_template(template: str, values: dict[str, str]) -> str:
+ """Populate and return the given template with the provided values."""
+ for key in sorted(values):
+ value = values[key]
+ template = template.replace('@%s' % key, value)
+
+ return template
+
+
+class CloudEnvironment(CloudBase):
+ """Base class for cloud environment plugins. Updates integration test environment after delegation."""
+ def setup_once(self) -> None:
+ """Run setup if it has not already been run."""
+ if self.setup_executed:
+ return
+
+ self.setup()
+ self.setup_executed = True
+
+ def setup(self) -> None:
+ """Setup which should be done once per environment instead of once per test target."""
+
+ @abc.abstractmethod
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+
+ def on_failure(self, target: IntegrationTarget, tries: int) -> None:
+ """Callback to run when an integration target fails."""
+
+
+class CloudEnvironmentConfig:
+ """Configuration for the environment."""
+ def __init__(self,
+ env_vars: t.Optional[dict[str, str]] = None,
+ ansible_vars: t.Optional[dict[str, t.Any]] = None,
+ module_defaults: t.Optional[dict[str, dict[str, t.Any]]] = None,
+ callback_plugins: t.Optional[list[str]] = None,
+ ):
+ self.env_vars = env_vars
+ self.ansible_vars = ansible_vars
+ self.module_defaults = module_defaults
+ self.callback_plugins = callback_plugins
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/acme.py b/test/lib/ansible_test/_internal/commands/integration/cloud/acme.py
new file mode 100644
index 0000000..007d383
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/acme.py
@@ -0,0 +1,79 @@
+"""ACME plugin for integration tests."""
+from __future__ import annotations
+
+import os
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....containers import (
+ CleanupMode,
+ run_support_container,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class ACMEProvider(CloudProvider):
+ """ACME plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'acme-simulator'
+
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_ACME_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/acme-test-container:2.1.0'
+
+ self.uses_docker = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def _setup_dynamic(self) -> None:
+ """Create a ACME test container using docker."""
+ ports = [
+ 5000, # control port for flask app in container
+ 14000, # Pebble ACME CA
+ ]
+
+ run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=CleanupMode.YES,
+ )
+
+ self._set_cloud_config('acme_host', self.DOCKER_SIMULATOR_NAME)
+
+ def _setup_static(self) -> None:
+ raise NotImplementedError()
+
+
+class ACMEEnvironment(CloudEnvironment):
+ """ACME environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ ansible_vars = dict(
+ acme_host=self._get_cloud_config('acme_host'),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py b/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py
new file mode 100644
index 0000000..234f311
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/aws.py
@@ -0,0 +1,131 @@
+"""AWS plugin for integration tests."""
+from __future__ import annotations
+
+import os
+import uuid
+import configparser
+import typing as t
+
+from ....util import (
+ ApplicationError,
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....target import (
+ IntegrationTarget,
+)
+
+from ....core_ci import (
+ AnsibleCoreCI,
+ CloudResource,
+)
+
+from ....host_configs import (
+ OriginConfig,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class AwsCloudProvider(CloudProvider):
+ """AWS cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.uses_config = True
+
+ def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
+ """Filter out the cloud tests when the necessary config and resources are not available."""
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super().filter(targets, exclude)
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ aws_config_path = os.path.expanduser('~/.aws')
+
+ if os.path.exists(aws_config_path) and isinstance(self.args.controller, OriginConfig):
+ raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self) -> None:
+ """Request AWS credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ credentials = response['aws']['credentials']
+
+ values = dict(
+ ACCESS_KEY=credentials['access_key'],
+ SECRET_KEY=credentials['secret_key'],
+ SECURITY_TOKEN=credentials['session_token'],
+ REGION='us-east-1',
+ )
+
+ display.sensitive.add(values['SECRET_KEY'])
+ display.sensitive.add(values['SECURITY_TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self) -> AnsibleCoreCI:
+ """Return an AWS instance of AnsibleCoreCI."""
+ return AnsibleCoreCI(self.args, CloudResource(platform='aws'))
+
+
+class AwsCloudEnvironment(CloudEnvironment):
+ """AWS cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars: dict[str, t.Any] = dict(
+ resource_prefix=self.resource_prefix,
+ tiny_prefix=uuid.uuid4().hex[0:12]
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('aws_secret_key'))
+ display.sensitive.add(ansible_vars.get('security_token'))
+
+ if 'aws_cleanup' not in ansible_vars:
+ ansible_vars['aws_cleanup'] = not self.managed
+
+ env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ callback_plugins=['aws_resource_actions'],
+ )
+
+ def on_failure(self, target: IntegrationTarget, tries: int) -> None:
+ """Callback to run when an integration target fails."""
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
+ 'https://docs.ansible.com/ansible/devel/collections/amazon/aws/docsite/dev_guidelines.html#aws-permissions-for-integration-tests'
+ % target.name)
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py b/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py
new file mode 100644
index 0000000..dc5136a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/azure.py
@@ -0,0 +1,166 @@
+"""Azure plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+import typing as t
+
+from ....util import (
+ ApplicationError,
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....target import (
+ IntegrationTarget,
+)
+
+from ....core_ci import (
+ AnsibleCoreCI,
+ CloudResource,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class AzureCloudProvider(CloudProvider):
+ """Azure cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.aci: t.Optional[AnsibleCoreCI] = None
+
+ self.uses_config = True
+
+ def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
+ """Filter out the cloud tests when the necessary config and resources are not available."""
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super().filter(targets, exclude)
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ get_config(self.config_path) # check required variables
+
+ def cleanup(self) -> None:
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.aci:
+ self.aci.stop()
+
+ super().cleanup()
+
+ def _setup_dynamic(self) -> None:
+ """Request Azure credentials through ansible-core-ci."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+ response = {}
+
+ aci = self._create_ansible_core_ci()
+
+ aci_result = aci.start()
+
+ if not self.args.explain:
+ response = aci_result['azure']
+ self.aci = aci
+
+ if not self.args.explain:
+ values = dict(
+ AZURE_CLIENT_ID=response['clientId'],
+ AZURE_SECRET=response['clientSecret'],
+ AZURE_SUBSCRIPTION_ID=response['subscriptionId'],
+ AZURE_TENANT=response['tenantId'],
+ RESOURCE_GROUP=response['resourceGroupNames'][0],
+ RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1],
+ )
+
+ display.sensitive.add(values['AZURE_SECRET'])
+
+ config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values))
+
+ config = '[default]\n' + config
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self) -> AnsibleCoreCI:
+ """Return an Azure instance of AnsibleCoreCI."""
+ return AnsibleCoreCI(self.args, CloudResource(platform='azure'))
+
+
+class AzureCloudEnvironment(CloudEnvironment):
+ """Azure cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ env_vars = get_config(self.config_path)
+
+ display.sensitive.add(env_vars.get('AZURE_SECRET'))
+ display.sensitive.add(env_vars.get('AZURE_PASSWORD'))
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
+
+ def on_failure(self, target: IntegrationTarget, tries: int) -> None:
+ """Callback to run when an integration target fails."""
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the test policy may need to be updated.' % target.name)
+
+
+def get_config(config_path: str) -> dict[str, str]:
+ """Return a configuration dictionary parsed from the given configuration path."""
+ parser = configparser.ConfigParser()
+ parser.read(config_path)
+
+ config = dict((key.upper(), value) for key, value in parser.items('default'))
+
+ rg_vars = (
+ 'RESOURCE_GROUP',
+ 'RESOURCE_GROUP_SECONDARY',
+ )
+
+ sp_vars = (
+ 'AZURE_CLIENT_ID',
+ 'AZURE_SECRET',
+ 'AZURE_SUBSCRIPTION_ID',
+ 'AZURE_TENANT',
+ )
+
+ ad_vars = (
+ 'AZURE_AD_USER',
+ 'AZURE_PASSWORD',
+ 'AZURE_SUBSCRIPTION_ID',
+ )
+
+ rg_ok = all(var in config for var in rg_vars)
+ sp_ok = all(var in config for var in sp_vars)
+ ad_ok = all(var in config for var in ad_vars)
+
+ if not rg_ok:
+ raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars)))
+
+ if not sp_ok and not ad_ok:
+ raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % (
+ ', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars))))
+
+ return config
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/cloudscale.py b/test/lib/ansible_test/_internal/commands/integration/cloud/cloudscale.py
new file mode 100644
index 0000000..f453ef3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/cloudscale.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Cloudscale plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+
+from ....util import (
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class CloudscaleCloudProvider(CloudProvider):
+ """Cloudscale cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ self._use_static_config()
+
+
+class CloudscaleCloudEnvironment(CloudEnvironment):
+ """Cloudscale cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ CLOUDSCALE_API_TOKEN=parser.get('default', 'cloudscale_api_token'),
+ )
+
+ display.sensitive.add(env_vars['CLOUDSCALE_API_TOKEN'])
+
+ ansible_vars = dict(
+ cloudscale_resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py b/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py
new file mode 100644
index 0000000..0037b42
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/cs.py
@@ -0,0 +1,174 @@
+"""CloudStack plugin for integration tests."""
+from __future__ import annotations
+
+import json
+import configparser
+import os
+import urllib.parse
+import typing as t
+
+from ....util import (
+ ApplicationError,
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....docker_util import (
+ docker_exec,
+)
+
+from ....containers import (
+ CleanupMode,
+ run_support_container,
+ wait_for_file,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class CsCloudProvider(CloudProvider):
+ """CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
+
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.image = os.environ.get('ANSIBLE_CLOUDSTACK_CONTAINER', 'quay.io/ansible/cloudstack-test-container:1.4.0')
+ self.host = ''
+ self.port = 0
+
+ self.uses_docker = True
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def _setup_static(self) -> None:
+ """Configure CloudStack tests for use with static configuration."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_static_path)
+
+ endpoint = parser.get('cloudstack', 'endpoint')
+
+ parts = urllib.parse.urlparse(endpoint)
+
+ self.host = parts.hostname
+
+ if not self.host:
+ raise ApplicationError('Could not determine host from endpoint: %s' % endpoint)
+
+ if parts.port:
+ self.port = parts.port
+ elif parts.scheme == 'http':
+ self.port = 80
+ elif parts.scheme == 'https':
+ self.port = 443
+ else:
+ raise ApplicationError('Could not determine port from endpoint: %s' % endpoint)
+
+ display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
+
+ def _setup_dynamic(self) -> None:
+ """Create a CloudStack simulator using docker."""
+ config = self._read_config_template()
+
+ self.port = 8888
+
+ ports = [
+ self.port,
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=CleanupMode.YES,
+ )
+
+ if not descriptor:
+ return
+
+ # apply work-around for OverlayFS issue
+ # https://github.com/docker/for-linux/issues/72#issuecomment-319904698
+ docker_exec(self.args, self.DOCKER_SIMULATOR_NAME, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'], capture=True)
+
+ if self.args.explain:
+ values = dict(
+ HOST=self.host,
+ PORT=str(self.port),
+ )
+ else:
+ credentials = self._get_credentials(self.DOCKER_SIMULATOR_NAME)
+
+ values = dict(
+ HOST=self.DOCKER_SIMULATOR_NAME,
+ PORT=str(self.port),
+ KEY=credentials['apikey'],
+ SECRET=credentials['secretkey'],
+ )
+
+ display.sensitive.add(values['SECRET'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _get_credentials(self, container_name: str) -> dict[str, t.Any]:
+ """Wait for the CloudStack simulator to return credentials."""
+ def check(value) -> bool:
+ """Return True if the given configuration is valid JSON, otherwise return False."""
+ # noinspection PyBroadException
+ try:
+ json.loads(value)
+ except Exception: # pylint: disable=broad-except
+ return False # sometimes the file exists but is not yet valid JSON
+
+ return True
+
+ stdout = wait_for_file(self.args, container_name, '/var/www/html/admin.json', sleep=10, tries=30, check=check)
+
+ return json.loads(stdout)
+
+
+class CsCloudEnvironment(CloudEnvironment):
+ """CloudStack cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ config = dict(parser.items('default'))
+
+ env_vars = dict(
+ CLOUDSTACK_ENDPOINT=config['endpoint'],
+ CLOUDSTACK_KEY=config['key'],
+ CLOUDSTACK_SECRET=config['secret'],
+ CLOUDSTACK_TIMEOUT=config['timeout'],
+ )
+
+ display.sensitive.add(env_vars['CLOUDSTACK_SECRET'])
+
+ ansible_vars = dict(
+ cs_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/digitalocean.py b/test/lib/ansible_test/_internal/commands/integration/cloud/digitalocean.py
new file mode 100644
index 0000000..a46bf70
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/digitalocean.py
@@ -0,0 +1,55 @@
+"""DigitalOcean plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+
+from ....util import (
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class DigitalOceanCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ self._use_static_config()
+
+
+class DigitalOceanCloudEnvironment(CloudEnvironment):
+ """Updates integration test environment after delegation. Will setup the config file as parameter."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ DO_API_KEY=parser.get('default', 'key'),
+ )
+
+ display.sensitive.add(env_vars['DO_API_KEY'])
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py b/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py
new file mode 100644
index 0000000..c2413ee
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/foreman.py
@@ -0,0 +1,94 @@
+"""Foreman plugin for integration tests."""
+from __future__ import annotations
+
+import os
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....containers import (
+ CleanupMode,
+ run_support_container,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class ForemanProvider(CloudProvider):
+ """Foreman plugin. Sets up Foreman stub server for tests."""
+ DOCKER_SIMULATOR_NAME = 'foreman-stub'
+
+ # Default image to run Foreman stub from.
+ #
+ # The simulator must be pinned to a specific version
+ # to guarantee CI passes with the version used.
+ #
+ # It's source source itself resides at:
+ # https://github.com/ansible/foreman-test-container
+ DOCKER_IMAGE = 'quay.io/ansible/foreman-test-container:1.4.0'
+
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_FRMNSIM_CONTAINER')
+ """
+ Overrides target container, might be used for development.
+
+ Use ANSIBLE_FRMNSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+
+ self.uses_docker = True
+
+ def setup(self) -> None:
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super().setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def _setup_dynamic(self) -> None:
+ """Spawn a Foreman stub within docker container."""
+ foreman_port = 8080
+
+ ports = [
+ foreman_port,
+ ]
+
+ run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=CleanupMode.YES,
+ )
+
+ self._set_cloud_config('FOREMAN_HOST', self.DOCKER_SIMULATOR_NAME)
+ self._set_cloud_config('FOREMAN_PORT', str(foreman_port))
+
+ def _setup_static(self) -> None:
+ raise NotImplementedError()
+
+
+class ForemanEnvironment(CloudEnvironment):
+ """Foreman environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ env_vars = dict(
+ FOREMAN_HOST=str(self._get_cloud_config('FOREMAN_HOST')),
+ FOREMAN_PORT=str(self._get_cloud_config('FOREMAN_PORT')),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py b/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py
new file mode 100644
index 0000000..e180a02
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/galaxy.py
@@ -0,0 +1,168 @@
+"""Galaxy (ansible-galaxy) plugin for integration tests."""
+from __future__ import annotations
+
+import os
+import tempfile
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....docker_util import (
+ docker_cp_to,
+)
+
+from ....containers import (
+ run_support_container,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+# We add BasicAuthentication, to make the tasks that deal with
+# direct API access easier to deal with across galaxy_ng and pulp
+SETTINGS = b'''
+CONTENT_ORIGIN = 'http://ansible-ci-pulp:80'
+ANSIBLE_API_HOSTNAME = 'http://ansible-ci-pulp:80'
+ANSIBLE_CONTENT_HOSTNAME = 'http://ansible-ci-pulp:80/pulp/content'
+TOKEN_AUTH_DISABLED = True
+GALAXY_REQUIRE_CONTENT_APPROVAL = False
+GALAXY_AUTHENTICATION_CLASSES = [
+ "rest_framework.authentication.SessionAuthentication",
+ "rest_framework.authentication.TokenAuthentication",
+ "rest_framework.authentication.BasicAuthentication",
+]
+'''
+
+SET_ADMIN_PASSWORD = b'''#!/usr/bin/execlineb -S0
+foreground {
+ redirfd -w 1 /dev/null
+ redirfd -w 2 /dev/null
+ export DJANGO_SETTINGS_MODULE pulpcore.app.settings
+ export PULP_CONTENT_ORIGIN localhost
+ s6-setuidgid postgres
+ if { /usr/local/bin/django-admin reset-admin-password --password password }
+ if { /usr/local/bin/pulpcore-manager create-group system:partner-engineers --users admin }
+}
+'''
+
+# There are 2 overrides here:
+# 1. Change the gunicorn bind address from 127.0.0.1 to 0.0.0.0 now that Galaxy NG does not allow us to access the
+# Pulp API through it.
+# 2. Grant access allowing us to DELETE a namespace in Galaxy NG. This is as CI deletes and recreates repos and
+# distributions in Pulp which now breaks the namespace in Galaxy NG. Recreating it is the "simple" fix to get it
+# working again.
+# These may not be needed in the future, especially if 1 becomes configurable by an env var but for now they must be
+# done.
+OVERRIDES = b'''#!/usr/bin/execlineb -S0
+foreground {
+ sed -i "0,/\\"127.0.0.1:24817\\"/s//\\"0.0.0.0:24817\\"/" /etc/services.d/pulpcore-api/run
+}
+
+# This sed calls changes the first occurrence to "allow" which is conveniently the delete operation for a namespace.
+# https://github.com/ansible/galaxy_ng/blob/master/galaxy_ng/app/access_control/statements/standalone.py#L9-L11.
+backtick NG_PREFIX { python -c "import galaxy_ng; print(galaxy_ng.__path__[0], end='')" }
+importas ng_prefix NG_PREFIX
+foreground {
+ sed -i "0,/\\"effect\\": \\"deny\\"/s//\\"effect\\": \\"allow\\"/" ${ng_prefix}/app/access_control/statements/standalone.py
+}'''
+
+
+class GalaxyProvider(CloudProvider):
+ """
+ Galaxy plugin. Sets up pulp (ansible-galaxy) servers for tests.
+ The pulp source itself resides at: https://github.com/pulp/pulp-oci-images
+ """
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ # Cannot use the latest container image as either galaxy_ng 4.2.0rc2 or pulp 0.5.0 has sporatic issues with
+ # dropping published collections in CI. Try running the tests multiple times when updating. Will also need to
+ # comment out the cache tests in 'test/integration/targets/ansible-galaxy-collection/tasks/install.yml' when
+ # the newer update is available.
+ self.pulp = os.environ.get(
+ 'ANSIBLE_PULP_CONTAINER',
+ 'quay.io/ansible/pulp-galaxy-ng:b79a7be64eff'
+ )
+
+ self.uses_docker = True
+
+ def setup(self) -> None:
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super().setup()
+
+ galaxy_port = 80
+ pulp_host = 'ansible-ci-pulp'
+ pulp_port = 24817
+
+ ports = [
+ galaxy_port,
+ pulp_port,
+ ]
+
+ # Create the container, don't run it, we need to inject configs before it starts
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.pulp,
+ pulp_host,
+ ports,
+ start=False,
+ allow_existing=True,
+ )
+
+ if not descriptor:
+ return
+
+ if not descriptor.running:
+ pulp_id = descriptor.container_id
+
+ injected_files = {
+ '/etc/pulp/settings.py': SETTINGS,
+ '/etc/cont-init.d/111-postgres': SET_ADMIN_PASSWORD,
+ '/etc/cont-init.d/000-ansible-test-overrides': OVERRIDES,
+ }
+ for path, content in injected_files.items():
+ with tempfile.NamedTemporaryFile() as temp_fd:
+ temp_fd.write(content)
+ temp_fd.flush()
+ docker_cp_to(self.args, pulp_id, temp_fd.name, path)
+
+ descriptor.start(self.args)
+
+ self._set_cloud_config('PULP_HOST', pulp_host)
+ self._set_cloud_config('PULP_PORT', str(pulp_port))
+ self._set_cloud_config('GALAXY_PORT', str(galaxy_port))
+ self._set_cloud_config('PULP_USER', 'admin')
+ self._set_cloud_config('PULP_PASSWORD', 'password')
+
+
+class GalaxyEnvironment(CloudEnvironment):
+ """Galaxy environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ pulp_user = str(self._get_cloud_config('PULP_USER'))
+ pulp_password = str(self._get_cloud_config('PULP_PASSWORD'))
+ pulp_host = self._get_cloud_config('PULP_HOST')
+ galaxy_port = self._get_cloud_config('GALAXY_PORT')
+ pulp_port = self._get_cloud_config('PULP_PORT')
+
+ return CloudEnvironmentConfig(
+ ansible_vars=dict(
+ pulp_user=pulp_user,
+ pulp_password=pulp_password,
+ pulp_api='http://%s:%s' % (pulp_host, pulp_port),
+ pulp_server='http://%s:%s/pulp_ansible/galaxy/' % (pulp_host, pulp_port),
+ galaxy_ng_server='http://%s:%s/api/galaxy/' % (pulp_host, galaxy_port),
+ ),
+ env_vars=dict(
+ PULP_USER=pulp_user,
+ PULP_PASSWORD=pulp_password,
+ PULP_SERVER='http://%s:%s/pulp_ansible/galaxy/api/' % (pulp_host, pulp_port),
+ GALAXY_NG_SERVER='http://%s:%s/api/galaxy/' % (pulp_host, galaxy_port),
+ ),
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/gcp.py b/test/lib/ansible_test/_internal/commands/integration/cloud/gcp.py
new file mode 100644
index 0000000..28ffb7b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/gcp.py
@@ -0,0 +1,55 @@
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""GCP plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+
+from ....util import (
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class GcpCloudProvider(CloudProvider):
+ """GCP cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ if not self._use_static_config():
+ display.notice(
+ 'static configuration could not be used. are you missing a template file?'
+ )
+
+
+class GcpCloudEnvironment(CloudEnvironment):
+ """GCP cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py b/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py
new file mode 100644
index 0000000..4d75f22
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py
@@ -0,0 +1,106 @@
+"""Hetzner Cloud plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+
+from ....util import (
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....target import (
+ IntegrationTarget,
+)
+
+from ....core_ci import (
+ AnsibleCoreCI,
+ CloudResource,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class HcloudCloudProvider(CloudProvider):
+ """Hetzner Cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.uses_config = True
+
+ def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
+ """Filter out the cloud tests when the necessary config and resources are not available."""
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super().filter(targets, exclude)
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self) -> None:
+ """Request Hetzner credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ token = response['hetzner']['token']
+
+ display.sensitive.add(token)
+ display.info('Hetzner Cloud Token: %s' % token, verbosity=1)
+
+ values = dict(
+ TOKEN=token,
+ )
+
+ display.sensitive.add(values['TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self) -> AnsibleCoreCI:
+ """Return a Heztner instance of AnsibleCoreCI."""
+ return AnsibleCoreCI(self.args, CloudResource(platform='hetzner'))
+
+
+class HcloudCloudEnvironment(CloudEnvironment):
+ """Hetzner Cloud cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'),
+ )
+
+ display.sensitive.add(env_vars['HCLOUD_TOKEN'])
+
+ ansible_vars = dict(
+ hcloud_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py b/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py
new file mode 100644
index 0000000..e250eed
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/httptester.py
@@ -0,0 +1,92 @@
+"""HTTP Tester plugin for integration tests."""
+from __future__ import annotations
+
+import os
+
+from ....util import (
+ display,
+ generate_password,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....containers import (
+ CleanupMode,
+ run_support_container,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+KRB5_PASSWORD_ENV = 'KRB5_PASSWORD'
+
+
+class HttptesterProvider(CloudProvider):
+ """HTTP Tester provider plugin. Sets up resources before delegation."""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.image = os.environ.get('ANSIBLE_HTTP_TEST_CONTAINER', 'quay.io/ansible/http-test-container:2.1.0')
+
+ self.uses_docker = True
+
+ def setup(self) -> None:
+ """Setup resources before delegation."""
+ super().setup()
+
+ ports = [
+ 80,
+ 88,
+ 443,
+ 444,
+ 749,
+ ]
+
+ aliases = [
+ 'ansible.http.tests',
+ 'sni1.ansible.http.tests',
+ 'fail.ansible.http.tests',
+ 'self-signed.ansible.http.tests',
+ ]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ 'http-test-container',
+ ports,
+ aliases=aliases,
+ allow_existing=True,
+ cleanup=CleanupMode.YES,
+ env={
+ KRB5_PASSWORD_ENV: generate_password(),
+ },
+ )
+
+ if not descriptor:
+ return
+
+ # Read the password from the container environment.
+ # This allows the tests to work when re-using an existing container.
+ # The password is marked as sensitive, since it may differ from the one we generated.
+ krb5_password = descriptor.details.container.env_dict()[KRB5_PASSWORD_ENV]
+ display.sensitive.add(krb5_password)
+
+ self._set_cloud_config(KRB5_PASSWORD_ENV, krb5_password)
+
+
+class HttptesterEnvironment(CloudEnvironment):
+ """HTTP Tester environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ return CloudEnvironmentConfig(
+ env_vars=dict(
+ HTTPTESTER='1', # backwards compatibility for tests intended to work with or without HTTP Tester
+ KRB5_PASSWORD=str(self._get_cloud_config(KRB5_PASSWORD_ENV)),
+ )
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/nios.py b/test/lib/ansible_test/_internal/commands/integration/cloud/nios.py
new file mode 100644
index 0000000..df0ebb0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/nios.py
@@ -0,0 +1,97 @@
+"""NIOS plugin for integration tests."""
+from __future__ import annotations
+
+import os
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....containers import (
+ CleanupMode,
+ run_support_container,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class NiosProvider(CloudProvider):
+ """Nios plugin. Sets up NIOS mock server for tests."""
+ DOCKER_SIMULATOR_NAME = 'nios-simulator'
+
+ # Default image to run the nios simulator.
+ #
+ # The simulator must be pinned to a specific version
+ # to guarantee CI passes with the version used.
+ #
+ # It's source source itself resides at:
+ # https://github.com/ansible/nios-test-container
+ DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:1.4.0'
+
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_NIOSSIM_CONTAINER')
+ """
+ Overrides target container, might be used for development.
+
+ Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+
+ self.uses_docker = True
+
+ def setup(self) -> None:
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super().setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def _setup_dynamic(self) -> None:
+ """Spawn a NIOS simulator within docker container."""
+ nios_port = 443
+
+ ports = [
+ nios_port,
+ ]
+
+ run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=CleanupMode.YES,
+ )
+
+ self._set_cloud_config('NIOS_HOST', self.DOCKER_SIMULATOR_NAME)
+
+ def _setup_static(self) -> None:
+ raise NotImplementedError()
+
+
+class NiosEnvironment(CloudEnvironment):
+ """NIOS environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ ansible_vars = dict(
+ nios_provider=dict(
+ host=self._get_cloud_config('NIOS_HOST'),
+ username='admin',
+ password='infoblox',
+ ),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/opennebula.py b/test/lib/ansible_test/_internal/commands/integration/cloud/opennebula.py
new file mode 100644
index 0000000..d005a3c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/opennebula.py
@@ -0,0 +1,60 @@
+"""OpenNebula plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+
+from ....util import (
+ display,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class OpenNebulaCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ self.uses_config = True
+
+ def _setup_dynamic(self) -> None:
+ display.info('No config file provided, will run test from fixtures')
+
+ config = self._read_config_template()
+ values = dict(
+ URL="http://localhost/RPC2",
+ USERNAME='oneadmin',
+ PASSWORD='onepass',
+ FIXTURES='true',
+ REPLAY='true',
+ )
+ config = self._populate_config_template(config, values)
+ self._write_config(config)
+
+
+class OpenNebulaCloudEnvironment(CloudEnvironment):
+ """Updates integration test environment after delegation. Will setup the config file as parameter."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('opennebula_password'))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/openshift.py b/test/lib/ansible_test/_internal/commands/integration/cloud/openshift.py
new file mode 100644
index 0000000..da930c0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/openshift.py
@@ -0,0 +1,114 @@
+"""OpenShift plugin for integration tests."""
+from __future__ import annotations
+
+import re
+
+from ....io import (
+ read_text_file,
+)
+
+from ....util import (
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....containers import (
+ CleanupMode,
+ run_support_container,
+ wait_for_file,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class OpenShiftCloudProvider(CloudProvider):
+ """OpenShift cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_CONTAINER_NAME = 'openshift-origin'
+
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args, config_extension='.kubeconfig')
+
+ # The image must be pinned to a specific version to guarantee CI passes with the version used.
+ self.image = 'quay.io/ansible/openshift-origin:v3.9.0'
+
+ self.uses_docker = True
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def _setup_static(self) -> None:
+ """Configure OpenShift tests for use with static configuration."""
+ config = read_text_file(self.config_static_path)
+
+ match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)
+
+ if not match:
+ display.warning('Could not find OpenShift endpoint in kubeconfig.')
+
+ def _setup_dynamic(self) -> None:
+ """Create a OpenShift container using docker."""
+ port = 8443
+
+ ports = [
+ port,
+ ]
+
+ cmd = ['start', 'master', '--listen', 'https://0.0.0.0:%d' % port]
+
+ descriptor = run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_CONTAINER_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=CleanupMode.YES,
+ cmd=cmd,
+ )
+
+ if not descriptor:
+ return
+
+ if self.args.explain:
+ config = '# Unknown'
+ else:
+ config = self._get_config(self.DOCKER_CONTAINER_NAME, 'https://%s:%s/' % (self.DOCKER_CONTAINER_NAME, port))
+
+ self._write_config(config)
+
+ def _get_config(self, container_name: str, server: str) -> str:
+ """Get OpenShift config from container."""
+ stdout = wait_for_file(self.args, container_name, '/var/lib/origin/openshift.local.config/master/admin.kubeconfig', sleep=10, tries=30)
+
+ config = stdout
+ config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE)
+ config = re.sub(r'^( *)server: .*$', r'\1server: %s' % server, config, flags=re.MULTILINE)
+
+ return config
+
+
+class OpenShiftCloudEnvironment(CloudEnvironment):
+ """OpenShift cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ env_vars = dict(
+ K8S_AUTH_KUBECONFIG=self.config_path,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/scaleway.py b/test/lib/ansible_test/_internal/commands/integration/cloud/scaleway.py
new file mode 100644
index 0000000..04c2d89
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/scaleway.py
@@ -0,0 +1,56 @@
+"""Scaleway plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+
+from ....util import (
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class ScalewayCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ self._use_static_config()
+
+
+class ScalewayCloudEnvironment(CloudEnvironment):
+ """Updates integration test environment after delegation. Will setup the config file as parameter."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ SCW_API_KEY=parser.get('default', 'key'),
+ SCW_ORG=parser.get('default', 'org')
+ )
+
+ display.sensitive.add(env_vars['SCW_API_KEY'])
+
+ ansible_vars = dict(
+ scw_org=parser.get('default', 'org'),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py b/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py
new file mode 100644
index 0000000..df1651f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/vcenter.py
@@ -0,0 +1,138 @@
+"""VMware vCenter plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+import os
+
+from ....util import (
+ ApplicationError,
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from ....containers import (
+ CleanupMode,
+ run_support_container,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class VcenterProvider(CloudProvider):
+ """VMware vcenter/esx plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
+
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/vcenter-test-container:1.7.0'
+
+ # VMware tests can be run on govcsim or BYO with a static config file.
+ # The simulator is the default if no config is provided.
+ self.vmware_test_platform = os.environ.get('VMWARE_TEST_PLATFORM', 'govcsim')
+
+ if self.vmware_test_platform == 'govcsim':
+ self.uses_docker = True
+ self.uses_config = False
+ elif self.vmware_test_platform == 'static':
+ self.uses_docker = False
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ self._set_cloud_config('vmware_test_platform', self.vmware_test_platform)
+
+ if self.vmware_test_platform == 'govcsim':
+ self._setup_dynamic_simulator()
+ self.managed = True
+ elif self.vmware_test_platform == 'static':
+ self._use_static_config()
+ self._setup_static()
+ else:
+ raise ApplicationError('Unknown vmware_test_platform: %s' % self.vmware_test_platform)
+
+ def _setup_dynamic_simulator(self) -> None:
+ """Create a vcenter simulator using docker."""
+ ports = [
+ 443,
+ 8080,
+ 8989,
+ 5000, # control port for flask app in simulator
+ ]
+
+ run_support_container(
+ self.args,
+ self.platform,
+ self.image,
+ self.DOCKER_SIMULATOR_NAME,
+ ports,
+ allow_existing=True,
+ cleanup=CleanupMode.YES,
+ )
+
+ self._set_cloud_config('vcenter_hostname', self.DOCKER_SIMULATOR_NAME)
+
+ def _setup_static(self) -> None:
+ if not os.path.exists(self.config_static_path):
+ raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path)
+
+
+class VcenterEnvironment(CloudEnvironment):
+ """VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ try:
+ # We may be in a container, so we cannot just reach VMWARE_TEST_PLATFORM,
+ # We do a try/except instead
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path) # static
+
+ env_vars = {}
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+ ansible_vars.update(dict(parser.items('DEFAULT', raw=True)))
+ except KeyError: # govcsim
+ env_vars = dict(
+ VCENTER_HOSTNAME=str(self._get_cloud_config('vcenter_hostname')),
+ VCENTER_USERNAME='user',
+ VCENTER_PASSWORD='pass',
+ )
+
+ ansible_vars = dict(
+ vcsim=str(self._get_cloud_config('vcenter_hostname')),
+ vcenter_hostname=str(self._get_cloud_config('vcenter_hostname')),
+ vcenter_username='user',
+ vcenter_password='pass',
+ )
+
+ for key, value in ansible_vars.items():
+ if key.endswith('_password'):
+ display.sensitive.add(value)
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ module_defaults={
+ 'group/vmware': {
+ 'hostname': ansible_vars['vcenter_hostname'],
+ 'username': ansible_vars['vcenter_username'],
+ 'password': ansible_vars['vcenter_password'],
+ 'port': ansible_vars.get('vcenter_port', '443'),
+ 'validate_certs': ansible_vars.get('vmware_validate_certs', 'no'),
+ },
+ },
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/cloud/vultr.py b/test/lib/ansible_test/_internal/commands/integration/cloud/vultr.py
new file mode 100644
index 0000000..1993cda
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/cloud/vultr.py
@@ -0,0 +1,55 @@
+"""Vultr plugin for integration tests."""
+from __future__ import annotations
+
+import configparser
+
+from ....util import (
+ display,
+)
+
+from ....config import (
+ IntegrationConfig,
+)
+
+from . import (
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+ CloudProvider,
+)
+
+
+class VultrCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+ def __init__(self, args: IntegrationConfig) -> None:
+ super().__init__(args)
+
+ self.uses_config = True
+
+ def setup(self) -> None:
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super().setup()
+
+ self._use_static_config()
+
+
+class VultrCloudEnvironment(CloudEnvironment):
+ """Updates integration test environment after delegation. Will setup the config file as parameter."""
+ def get_environment_config(self) -> CloudEnvironmentConfig:
+ """Return environment configuration for use in the test environment after delegation."""
+ parser = configparser.ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ VULTR_API_KEY=parser.get('default', 'key'),
+ )
+
+ display.sensitive.add(env_vars['VULTR_API_KEY'])
+
+ ansible_vars = dict(
+ vultr_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/commands/integration/coverage.py b/test/lib/ansible_test/_internal/commands/integration/coverage.py
new file mode 100644
index 0000000..5a486e9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/coverage.py
@@ -0,0 +1,417 @@
+"""Code coverage support for integration tests."""
+from __future__ import annotations
+
+import abc
+import os
+import shutil
+import tempfile
+import typing as t
+import zipfile
+
+from ...io import (
+ write_text_file,
+)
+
+from ...ansible_util import (
+ run_playbook,
+)
+
+from ...config import (
+ IntegrationConfig,
+)
+
+from ...util import (
+ COVERAGE_CONFIG_NAME,
+ MODE_DIRECTORY,
+ MODE_DIRECTORY_WRITE,
+ MODE_FILE,
+ SubprocessError,
+ cache,
+ display,
+ generate_name,
+ get_generic_type,
+ get_type_map,
+ remove_tree,
+ sanitize_host_name,
+ verified_chmod,
+)
+
+from ...util_common import (
+ ResultType,
+)
+
+from ...coverage_util import (
+ generate_coverage_config,
+ get_coverage_platform,
+)
+
+from ...host_configs import (
+ HostConfig,
+ PosixConfig,
+ WindowsConfig,
+ WindowsInventoryConfig,
+ WindowsRemoteConfig,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_profiles import (
+ ControllerProfile,
+ HostProfile,
+ PosixProfile,
+ SshTargetHostProfile,
+)
+
+from ...provisioning import (
+ HostState,
+)
+
+from ...connections import (
+ LocalConnection,
+)
+
+from ...inventory import (
+ create_windows_inventory,
+ create_posix_inventory,
+)
+
+THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
+
+
+class CoverageHandler(t.Generic[THostConfig], metaclass=abc.ABCMeta):
+ """Base class for configuring hosts for integration test code coverage."""
+ def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
+ self.args = args
+ self.host_state = host_state
+ self.inventory_path = inventory_path
+ self.profiles = self.get_profiles()
+
+ def get_profiles(self) -> list[HostProfile]:
+ """Return a list of profiles relevant for this handler."""
+ profile_type = get_generic_type(type(self), HostConfig)
+ profiles = [profile for profile in self.host_state.target_profiles if isinstance(profile.config, profile_type)]
+
+ return profiles
+
+ @property
+ @abc.abstractmethod
+ def is_active(self) -> bool:
+ """True if the handler should be used, otherwise False."""
+
+ @abc.abstractmethod
+ def setup(self) -> None:
+ """Perform setup for code coverage."""
+
+ @abc.abstractmethod
+ def teardown(self) -> None:
+ """Perform teardown for code coverage."""
+
+ @abc.abstractmethod
+ def create_inventory(self) -> None:
+ """Create inventory, if needed."""
+
+ @abc.abstractmethod
+ def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
+ """Return a dictionary of environment variables for running tests with code coverage."""
+
+ def run_playbook(self, playbook: str, variables: dict[str, str]) -> None:
+ """Run the specified playbook using the current inventory."""
+ self.create_inventory()
+ run_playbook(self.args, self.inventory_path, playbook, capture=False, variables=variables)
+
+
+class PosixCoverageHandler(CoverageHandler[PosixConfig]):
+ """Configure integration test code coverage for POSIX hosts."""
+ def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
+ super().__init__(args, host_state, inventory_path)
+
+ # Common temporary directory used on all POSIX hosts that will be created world writeable.
+ self.common_temp_path = f'/tmp/ansible-test-{generate_name()}'
+
+ def get_profiles(self) -> list[HostProfile]:
+ """Return a list of profiles relevant for this handler."""
+ profiles = super().get_profiles()
+ profiles = [profile for profile in profiles if not isinstance(profile, ControllerProfile) or
+ profile.python.path != self.host_state.controller_profile.python.path]
+
+ return profiles
+
+ @property
+ def is_active(self) -> bool:
+ """True if the handler should be used, otherwise False."""
+ return True
+
+ @property
+ def target_profile(self) -> t.Optional[PosixProfile]:
+ """The POSIX target profile, if it uses a different Python interpreter than the controller, otherwise None."""
+ return t.cast(PosixProfile, self.profiles[0]) if self.profiles else None
+
+ def setup(self) -> None:
+ """Perform setup for code coverage."""
+ self.setup_controller()
+ self.setup_target()
+
+ def teardown(self) -> None:
+ """Perform teardown for code coverage."""
+ self.teardown_controller()
+ self.teardown_target()
+
+ def setup_controller(self) -> None:
+ """Perform setup for code coverage on the controller."""
+ coverage_config_path = os.path.join(self.common_temp_path, COVERAGE_CONFIG_NAME)
+ coverage_output_path = os.path.join(self.common_temp_path, ResultType.COVERAGE.name)
+
+ coverage_config = generate_coverage_config(self.args)
+
+ write_text_file(coverage_config_path, coverage_config, create_directories=True)
+
+ verified_chmod(coverage_config_path, MODE_FILE)
+ os.mkdir(coverage_output_path)
+ verified_chmod(coverage_output_path, MODE_DIRECTORY_WRITE)
+
+ def setup_target(self) -> None:
+ """Perform setup for code coverage on the target."""
+ if not self.target_profile:
+ return
+
+ if isinstance(self.target_profile, ControllerProfile):
+ return
+
+ self.run_playbook('posix_coverage_setup.yml', self.get_playbook_variables())
+
+ def teardown_controller(self) -> None:
+ """Perform teardown for code coverage on the controller."""
+ coverage_temp_path = os.path.join(self.common_temp_path, ResultType.COVERAGE.name)
+ platform = get_coverage_platform(self.args.controller)
+
+ for filename in os.listdir(coverage_temp_path):
+ shutil.copyfile(os.path.join(coverage_temp_path, filename), os.path.join(ResultType.COVERAGE.path, update_coverage_filename(filename, platform)))
+
+ remove_tree(self.common_temp_path)
+
+ def teardown_target(self) -> None:
+ """Perform teardown for code coverage on the target."""
+ if not self.target_profile:
+ return
+
+ if isinstance(self.target_profile, ControllerProfile):
+ return
+
+ profile = t.cast(SshTargetHostProfile, self.target_profile)
+ platform = get_coverage_platform(profile.config)
+ con = profile.get_controller_target_connections()[0]
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-test-coverage-', suffix='.tgz') as coverage_tgz:
+ try:
+ con.create_archive(chdir=self.common_temp_path, name=ResultType.COVERAGE.name, dst=coverage_tgz)
+ except SubprocessError as ex:
+ display.warning(f'Failed to download coverage results: {ex}')
+ else:
+ coverage_tgz.seek(0)
+
+ with tempfile.TemporaryDirectory() as temp_dir:
+ local_con = LocalConnection(self.args)
+ local_con.extract_archive(chdir=temp_dir, src=coverage_tgz)
+
+ base_dir = os.path.join(temp_dir, ResultType.COVERAGE.name)
+
+ for filename in os.listdir(base_dir):
+ shutil.copyfile(os.path.join(base_dir, filename), os.path.join(ResultType.COVERAGE.path, update_coverage_filename(filename, platform)))
+
+ self.run_playbook('posix_coverage_teardown.yml', self.get_playbook_variables())
+
+ def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
+ """Return a dictionary of environment variables for running tests with code coverage."""
+
+ # Enable code coverage collection on Ansible modules (both local and remote).
+ # Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage.
+ config_file = os.path.join(self.common_temp_path, COVERAGE_CONFIG_NAME)
+
+ # Include the command, target and platform marker so the remote host can create a filename with that info.
+ # The generated AnsiballZ wrapper is responsible for adding '=python-{X.Y}=coverage.{hostname}.{pid}.{id}'
+ coverage_file = os.path.join(self.common_temp_path, ResultType.COVERAGE.name, '='.join((self.args.command, target_name, 'platform')))
+
+ if self.args.coverage_check:
+ # cause the 'coverage' module to be found, but not imported or enabled
+ coverage_file = ''
+
+ variables = dict(
+ _ANSIBLE_COVERAGE_CONFIG=config_file,
+ _ANSIBLE_COVERAGE_OUTPUT=coverage_file,
+ )
+
+ return variables
+
+ def create_inventory(self) -> None:
+ """Create inventory."""
+ create_posix_inventory(self.args, self.inventory_path, self.host_state.target_profiles)
+
+ def get_playbook_variables(self) -> dict[str, str]:
+ """Return a dictionary of variables for setup and teardown of POSIX coverage."""
+ return dict(
+ common_temp_dir=self.common_temp_path,
+ coverage_config=generate_coverage_config(self.args),
+ coverage_config_path=os.path.join(self.common_temp_path, COVERAGE_CONFIG_NAME),
+ coverage_output_path=os.path.join(self.common_temp_path, ResultType.COVERAGE.name),
+ mode_directory=f'{MODE_DIRECTORY:04o}',
+ mode_directory_write=f'{MODE_DIRECTORY_WRITE:04o}',
+ mode_file=f'{MODE_FILE:04o}',
+ )
+
+
+class WindowsCoverageHandler(CoverageHandler[WindowsConfig]):
+ """Configure integration test code coverage for Windows hosts."""
+ def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
+ super().__init__(args, host_state, inventory_path)
+
+ # Common temporary directory used on all Windows hosts that will be created writable by everyone.
+ self.remote_temp_path = f'C:\\ansible_test_coverage_{generate_name()}'
+
+ @property
+ def is_active(self) -> bool:
+ """True if the handler should be used, otherwise False."""
+ return bool(self.profiles) and not self.args.coverage_check
+
+ def setup(self) -> None:
+ """Perform setup for code coverage."""
+ self.run_playbook('windows_coverage_setup.yml', self.get_playbook_variables())
+
+ def teardown(self) -> None:
+ """Perform teardown for code coverage."""
+ with tempfile.TemporaryDirectory() as local_temp_path:
+ variables = self.get_playbook_variables()
+ variables.update(
+ local_temp_path=local_temp_path,
+ )
+
+ self.run_playbook('windows_coverage_teardown.yml', variables)
+
+ for filename in os.listdir(local_temp_path):
+ if all(isinstance(profile.config, WindowsRemoteConfig) for profile in self.profiles):
+ prefix = 'remote'
+ elif all(isinstance(profile.config, WindowsInventoryConfig) for profile in self.profiles):
+ prefix = 'inventory'
+ else:
+ raise NotImplementedError()
+
+ platform = f'{prefix}-{sanitize_host_name(os.path.splitext(filename)[0])}'
+
+ with zipfile.ZipFile(os.path.join(local_temp_path, filename)) as coverage_zip:
+ for item in coverage_zip.infolist():
+ if item.is_dir():
+ raise Exception(f'Unexpected directory in zip file: {item.filename}')
+
+ item.filename = update_coverage_filename(item.filename, platform)
+
+ coverage_zip.extract(item, ResultType.COVERAGE.path)
+
+ def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
+ """Return a dictionary of environment variables for running tests with code coverage."""
+
+ # Include the command, target and platform marker so the remote host can create a filename with that info.
+ # The remote is responsible for adding '={language-version}=coverage.{hostname}.{pid}.{id}'
+ coverage_name = '='.join((self.args.command, target_name, 'platform'))
+
+ variables = dict(
+ _ANSIBLE_COVERAGE_REMOTE_OUTPUT=os.path.join(self.remote_temp_path, coverage_name),
+ _ANSIBLE_COVERAGE_REMOTE_PATH_FILTER=os.path.join(data_context().content.root, '*'),
+ )
+
+ return variables
+
+ def create_inventory(self) -> None:
+ """Create inventory."""
+ create_windows_inventory(self.args, self.inventory_path, self.host_state.target_profiles)
+
+ def get_playbook_variables(self) -> dict[str, str]:
+ """Return a dictionary of variables for setup and teardown of Windows coverage."""
+ return dict(
+ remote_temp_path=self.remote_temp_path,
+ )
+
+
+class CoverageManager:
+ """Manager for code coverage configuration and state."""
+ def __init__(self, args: IntegrationConfig, host_state: HostState, inventory_path: str) -> None:
+ self.args = args
+ self.host_state = host_state
+ self.inventory_path = inventory_path
+
+ if self.args.coverage:
+ handler_types = set(get_handler_type(type(profile.config)) for profile in host_state.profiles)
+ handler_types.discard(None)
+ else:
+ handler_types = set()
+
+ handlers = [handler_type(args=args, host_state=host_state, inventory_path=inventory_path) for handler_type in handler_types]
+
+ self.handlers = [handler for handler in handlers if handler.is_active]
+
+ def setup(self) -> None:
+ """Perform setup for code coverage."""
+ if not self.args.coverage:
+ return
+
+ for handler in self.handlers:
+ handler.setup()
+
+ def teardown(self) -> None:
+ """Perform teardown for code coverage."""
+ if not self.args.coverage:
+ return
+
+ for handler in self.handlers:
+ handler.teardown()
+
+ def get_environment(self, target_name: str, aliases: tuple[str, ...]) -> dict[str, str]:
+ """Return a dictionary of environment variables for running tests with code coverage."""
+ if not self.args.coverage or 'non_local/' in aliases:
+ return {}
+
+ env = {}
+
+ for handler in self.handlers:
+ env.update(handler.get_environment(target_name, aliases))
+
+ return env
+
+
+@cache
+def get_config_handler_type_map() -> dict[t.Type[HostConfig], t.Type[CoverageHandler]]:
+ """Create and return a mapping of HostConfig types to CoverageHandler types."""
+ return get_type_map(CoverageHandler, HostConfig)
+
+
+def get_handler_type(config_type: t.Type[HostConfig]) -> t.Optional[t.Type[CoverageHandler]]:
+ """Return the coverage handler type associated with the given host config type if found, otherwise return None."""
+ queue = [config_type]
+ type_map = get_config_handler_type_map()
+
+ while queue:
+ config_type = queue.pop(0)
+ handler_type = type_map.get(config_type)
+
+ if handler_type:
+ return handler_type
+
+ queue.extend(config_type.__bases__)
+
+ return None
+
+
+def update_coverage_filename(original_filename: str, platform: str) -> str:
+ """Validate the given filename and insert the specified platform, then return the result."""
+ parts = original_filename.split('=')
+
+ if original_filename != os.path.basename(original_filename) or len(parts) != 5 or parts[2] != 'platform':
+ raise Exception(f'Unexpected coverage filename: {original_filename}')
+
+ parts[2] = platform
+
+ updated_filename = '='.join(parts)
+
+ display.info(f'Coverage file for platform "{platform}": {original_filename} -> {updated_filename}', verbosity=3)
+
+ return updated_filename
diff --git a/test/lib/ansible_test/_internal/commands/integration/filters.py b/test/lib/ansible_test/_internal/commands/integration/filters.py
new file mode 100644
index 0000000..be03d7f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/filters.py
@@ -0,0 +1,279 @@
+"""Logic for filtering out integration test targets which are unsupported for the currently provided arguments and available hosts."""
+from __future__ import annotations
+
+import abc
+import typing as t
+
+from ...config import (
+ IntegrationConfig,
+)
+
+from ...util import (
+ cache,
+ detect_architecture,
+ display,
+ get_type_map,
+)
+
+from ...target import (
+ IntegrationTarget,
+)
+
+from ...host_configs import (
+ ControllerConfig,
+ DockerConfig,
+ FallbackReason,
+ HostConfig,
+ NetworkInventoryConfig,
+ NetworkRemoteConfig,
+ OriginConfig,
+ PosixConfig,
+ PosixRemoteConfig,
+ PosixSshConfig,
+ RemoteConfig,
+ WindowsInventoryConfig,
+ WindowsRemoteConfig,
+)
+
+from ...host_profiles import (
+ HostProfile,
+)
+
+THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
+TPosixConfig = t.TypeVar('TPosixConfig', bound=PosixConfig)
+TRemoteConfig = t.TypeVar('TRemoteConfig', bound=RemoteConfig)
+THostProfile = t.TypeVar('THostProfile', bound=HostProfile)
+
+
+class TargetFilter(t.Generic[THostConfig], metaclass=abc.ABCMeta):
+ """Base class for target filters."""
+ def __init__(self, args: IntegrationConfig, configs: list[THostConfig], controller: bool) -> None:
+ self.args = args
+ self.configs = configs
+ self.controller = controller
+ self.host_type = 'controller' if controller else 'target'
+
+ # values which are not host specific
+ self.include_targets = args.include
+ self.allow_root = args.allow_root
+ self.allow_destructive = args.allow_destructive
+
+ @property
+ def config(self) -> THostConfig:
+ """The configuration to filter. Only valid when there is a single config."""
+ if len(self.configs) != 1:
+ raise Exception()
+
+ return self.configs[0]
+
+ def skip(
+ self,
+ skip: str,
+ reason: str,
+ targets: list[IntegrationTarget],
+ exclude: set[str],
+ override: t.Optional[list[str]] = None,
+ ) -> None:
+ """Apply the specified skip rule to the given targets by updating the provided exclude list."""
+ if skip.startswith('skip/'):
+ skipped = [target.name for target in targets if skip in target.skips and (not override or target.name not in override)]
+ else:
+ skipped = [target.name for target in targets if f'{skip}/' in target.aliases and (not override or target.name not in override)]
+
+ self.apply_skip(f'"{skip}"', reason, skipped, exclude)
+
+ def apply_skip(self, marked: str, reason: str, skipped: list[str], exclude: set[str]) -> None:
+ """Apply the provided skips to the given exclude list."""
+ if not skipped:
+ return
+
+ exclude.update(skipped)
+ display.warning(f'Excluding {self.host_type} tests marked {marked} {reason}: {", ".join(skipped)}')
+
+ def filter_profiles(self, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]:
+ """Filter the list of profiles, returning only those which are not skipped for the given target."""
+ del target
+ return profiles
+
+ def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
+ """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
+ if self.controller and self.args.host_settings.controller_fallback and targets:
+ affected_targets = [target.name for target in targets]
+ reason = self.args.host_settings.controller_fallback.reason
+
+ if reason == FallbackReason.ENVIRONMENT:
+ exclude.update(affected_targets)
+ display.warning(f'Excluding {self.host_type} tests since a fallback controller is in use: {", ".join(affected_targets)}')
+ elif reason == FallbackReason.PYTHON:
+ display.warning(f'Some {self.host_type} tests may be redundant since a fallback python is in use: {", ".join(affected_targets)}')
+
+ if not self.allow_destructive and not self.config.is_managed:
+ override_destructive = set(target for target in self.include_targets if target.startswith('destructive/'))
+ override = [target.name for target in targets if override_destructive & set(target.aliases)]
+
+ self.skip('destructive', 'which require --allow-destructive or prefixing with "destructive/" to run on unmanaged hosts', targets, exclude, override)
+
+ if not self.args.allow_disabled:
+ override_disabled = set(target for target in self.args.include if target.startswith('disabled/'))
+ override = [target.name for target in targets if override_disabled & set(target.aliases)]
+
+ self.skip('disabled', 'which require --allow-disabled or prefixing with "disabled/"', targets, exclude, override)
+
+ if not self.args.allow_unsupported:
+ override_unsupported = set(target for target in self.args.include if target.startswith('unsupported/'))
+ override = [target.name for target in targets if override_unsupported & set(target.aliases)]
+
+ self.skip('unsupported', 'which require --allow-unsupported or prefixing with "unsupported/"', targets, exclude, override)
+
+ if not self.args.allow_unstable:
+ override_unstable = set(target for target in self.args.include if target.startswith('unstable/'))
+
+ if self.args.allow_unstable_changed:
+ override_unstable |= set(self.args.metadata.change_description.focused_targets or [])
+
+ override = [target.name for target in targets if override_unstable & set(target.aliases)]
+
+ self.skip('unstable', 'which require --allow-unstable or prefixing with "unstable/"', targets, exclude, override)
+
+
+class PosixTargetFilter(TargetFilter[TPosixConfig]):
+ """Target filter for POSIX hosts."""
+ def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
+ """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
+ super().filter_targets(targets, exclude)
+
+ if not self.allow_root and not self.config.have_root:
+ self.skip('needs/root', 'which require --allow-root or running as root', targets, exclude)
+
+ self.skip(f'skip/python{self.config.python.version}', f'which are not supported by Python {self.config.python.version}', targets, exclude)
+ self.skip(f'skip/python{self.config.python.major_version}', f'which are not supported by Python {self.config.python.major_version}', targets, exclude)
+
+
+class DockerTargetFilter(PosixTargetFilter[DockerConfig]):
+ """Target filter for docker hosts."""
+ def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
+ """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
+ super().filter_targets(targets, exclude)
+
+ self.skip('skip/docker', 'which cannot run under docker', targets, exclude)
+
+ if not self.config.privileged:
+ self.skip('needs/privileged', 'which require --docker-privileged to run under docker', targets, exclude)
+
+
+class PosixSshTargetFilter(PosixTargetFilter[PosixSshConfig]):
+ """Target filter for POSIX SSH hosts."""
+
+
+class RemoteTargetFilter(TargetFilter[TRemoteConfig]):
+ """Target filter for remote Ansible Core CI managed hosts."""
+ def filter_profiles(self, profiles: list[THostProfile], target: IntegrationTarget) -> list[THostProfile]:
+ """Filter the list of profiles, returning only those which are not skipped for the given target."""
+ profiles = super().filter_profiles(profiles, target)
+
+ skipped_profiles = [profile for profile in profiles if any(skip in target.skips for skip in get_remote_skip_aliases(profile.config))]
+
+ if skipped_profiles:
+ configs: list[TRemoteConfig] = [profile.config for profile in skipped_profiles]
+ display.warning(f'Excluding skipped hosts from inventory: {", ".join(config.name for config in configs)}')
+
+ profiles = [profile for profile in profiles if profile not in skipped_profiles]
+
+ return profiles
+
+ def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
+ """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
+ super().filter_targets(targets, exclude)
+
+ if len(self.configs) > 1:
+ host_skips = {host.name: get_remote_skip_aliases(host) for host in self.configs}
+
+ # Skip only targets which skip all hosts.
+ # Targets that skip only some hosts will be handled during inventory generation.
+ skipped = [target.name for target in targets if all(any(skip in target.skips for skip in skips) for skips in host_skips.values())]
+
+ if skipped:
+ exclude.update(skipped)
+ display.warning(f'Excluding tests which do not support {", ".join(host_skips.keys())}: {", ".join(skipped)}')
+ else:
+ skips = get_remote_skip_aliases(self.config)
+
+ for skip, reason in skips.items():
+ self.skip(skip, reason, targets, exclude)
+
+
+class PosixRemoteTargetFilter(PosixTargetFilter[PosixRemoteConfig], RemoteTargetFilter[PosixRemoteConfig]):
+ """Target filter for POSIX remote hosts."""
+
+
+class WindowsRemoteTargetFilter(RemoteTargetFilter[WindowsRemoteConfig]):
+ """Target filter for remote Windows hosts."""
+
+
+class WindowsInventoryTargetFilter(TargetFilter[WindowsInventoryConfig]):
+ """Target filter for Windows inventory."""
+
+
+class NetworkRemoteTargetFilter(RemoteTargetFilter[NetworkRemoteConfig]):
+ """Target filter for remote network hosts."""
+
+
+class NetworkInventoryTargetFilter(TargetFilter[NetworkInventoryConfig]):
+ """Target filter for network inventory."""
+
+
+class OriginTargetFilter(PosixTargetFilter[OriginConfig]):
+ """Target filter for localhost."""
+ def filter_targets(self, targets: list[IntegrationTarget], exclude: set[str]) -> None:
+ """Filter the list of targets, adding any which this host profile cannot support to the provided exclude list."""
+ super().filter_targets(targets, exclude)
+
+ arch = detect_architecture(self.config.python.path)
+
+ if arch:
+ self.skip(f'skip/{arch}', f'which are not supported by {arch}', targets, exclude)
+
+
+@cache
+def get_host_target_type_map() -> dict[t.Type[HostConfig], t.Type[TargetFilter]]:
+ """Create and return a mapping of HostConfig types to TargetFilter types."""
+ return get_type_map(TargetFilter, HostConfig)
+
+
+def get_target_filter(args: IntegrationConfig, configs: list[HostConfig], controller: bool) -> TargetFilter:
+ """Return an integration test target filter instance for the provided host configurations."""
+ target_type = type(configs[0])
+
+ if issubclass(target_type, ControllerConfig):
+ target_type = type(args.controller)
+ configs = [args.controller]
+
+ filter_type = get_host_target_type_map()[target_type]
+ filter_instance = filter_type(args, configs, controller)
+
+ return filter_instance
+
+
+def get_remote_skip_aliases(config: RemoteConfig) -> dict[str, str]:
+ """Return a dictionary of skip aliases and the reason why they apply."""
+ return get_platform_skip_aliases(config.platform, config.version, config.arch)
+
+
+def get_platform_skip_aliases(platform: str, version: str, arch: t.Optional[str]) -> dict[str, str]:
+ """Return a dictionary of skip aliases and the reason why they apply."""
+ skips = {
+ f'skip/{platform}': platform,
+ f'skip/{platform}/{version}': f'{platform} {version}',
+ f'skip/{platform}{version}': f'{platform} {version}', # legacy syntax, use above format
+ }
+
+ if arch:
+ skips.update({
+ f'skip/{arch}': arch,
+ f'skip/{arch}/{platform}': f'{platform} on {arch}',
+ f'skip/{arch}/{platform}/{version}': f'{platform} {version} on {arch}',
+ })
+
+ skips = {alias: f'which are not supported by {description}' for alias, description in skips.items()}
+
+ return skips
diff --git a/test/lib/ansible_test/_internal/commands/integration/network.py b/test/lib/ansible_test/_internal/commands/integration/network.py
new file mode 100644
index 0000000..d28416c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/network.py
@@ -0,0 +1,77 @@
+"""Network integration testing."""
+from __future__ import annotations
+
+import os
+
+from ...util import (
+ ApplicationError,
+ ANSIBLE_TEST_CONFIG_ROOT,
+)
+
+from ...util_common import (
+ handle_layout_messages,
+)
+
+from ...target import (
+ walk_network_integration_targets,
+)
+
+from ...config import (
+ NetworkIntegrationConfig,
+)
+
+from . import (
+ command_integration_filter,
+ command_integration_filtered,
+ get_inventory_absolute_path,
+ get_inventory_relative_path,
+ check_inventory,
+ delegate_inventory,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ NetworkInventoryConfig,
+ NetworkRemoteConfig,
+)
+
+
+def command_network_integration(args: NetworkIntegrationConfig) -> None:
+ """Entry point for the `network-integration` command."""
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if issubclass(args.target_type, NetworkInventoryConfig):
+ target = args.only_target(NetworkInventoryConfig)
+ inventory_path = get_inventory_absolute_path(args, target)
+
+ if args.delegate or not target.path:
+ target.path = inventory_relative_path
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if args.no_temp_workdir:
+ # temporary solution to keep DCI tests working
+ inventory_exists = os.path.exists(inventory_path)
+ else:
+ inventory_exists = os.path.isfile(inventory_path)
+
+ if not args.explain and not issubclass(args.target_type, NetworkRemoteConfig) and not inventory_exists:
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --platform to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_network_integration_targets(include_hidden=True))
+ host_state, internal_targets = command_integration_filter(args, all_targets)
+ command_integration_filtered(args, host_state, internal_targets, all_targets, inventory_path)
diff --git a/test/lib/ansible_test/_internal/commands/integration/posix.py b/test/lib/ansible_test/_internal/commands/integration/posix.py
new file mode 100644
index 0000000..d4c50d3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/posix.py
@@ -0,0 +1,48 @@
+"""POSIX integration testing."""
+from __future__ import annotations
+
+import os
+
+from ...util_common import (
+ handle_layout_messages,
+)
+
+from ...containers import (
+ create_container_hooks,
+ local_ssh,
+ root_ssh,
+)
+
+from ...target import (
+ walk_posix_integration_targets,
+)
+
+from ...config import (
+ PosixIntegrationConfig,
+)
+
+from . import (
+ command_integration_filter,
+ command_integration_filtered,
+ get_inventory_relative_path,
+)
+
+from ...data import (
+ data_context,
+)
+
+
+def command_posix_integration(args: PosixIntegrationConfig) -> None:
+ """Entry point for the `integration` command."""
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
+ host_state, internal_targets = command_integration_filter(args, all_targets)
+ control_connections = [local_ssh(args, host_state.controller_profile.python)]
+ managed_connections = [root_ssh(ssh) for ssh in host_state.get_controller_target_connections()]
+ pre_target, post_target = create_container_hooks(args, control_connections, managed_connections)
+
+ command_integration_filtered(args, host_state, internal_targets, all_targets, inventory_path, pre_target=pre_target, post_target=post_target)
diff --git a/test/lib/ansible_test/_internal/commands/integration/windows.py b/test/lib/ansible_test/_internal/commands/integration/windows.py
new file mode 100644
index 0000000..aa201c4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/integration/windows.py
@@ -0,0 +1,81 @@
+"""Windows integration testing."""
+from __future__ import annotations
+
+import os
+
+from ...util import (
+ ApplicationError,
+ ANSIBLE_TEST_CONFIG_ROOT,
+)
+
+from ...util_common import (
+ handle_layout_messages,
+)
+
+from ...containers import (
+ create_container_hooks,
+ local_ssh,
+ root_ssh,
+)
+
+from ...target import (
+ walk_windows_integration_targets,
+)
+
+from ...config import (
+ WindowsIntegrationConfig,
+)
+
+from ...host_configs import (
+ WindowsInventoryConfig,
+ WindowsRemoteConfig,
+)
+
+from . import (
+ command_integration_filter,
+ command_integration_filtered,
+ get_inventory_absolute_path,
+ get_inventory_relative_path,
+ check_inventory,
+ delegate_inventory,
+)
+
+from ...data import (
+ data_context,
+)
+
+
+def command_windows_integration(args: WindowsIntegrationConfig) -> None:
+ """Entry point for the `windows-integration` command."""
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if issubclass(args.target_type, WindowsInventoryConfig):
+ target = args.only_target(WindowsInventoryConfig)
+ inventory_path = get_inventory_absolute_path(args, target)
+
+ if args.delegate or not target.path:
+ target.path = inventory_relative_path
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if not args.explain and not issubclass(args.target_type, WindowsRemoteConfig) and not os.path.isfile(inventory_path):
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --windows to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
+ host_state, internal_targets = command_integration_filter(args, all_targets)
+ control_connections = [local_ssh(args, host_state.controller_profile.python)]
+ managed_connections = [root_ssh(ssh) for ssh in host_state.get_controller_target_connections()]
+ pre_target, post_target = create_container_hooks(args, control_connections, managed_connections)
+
+ command_integration_filtered(args, host_state, internal_targets, all_targets, inventory_path, pre_target=pre_target, post_target=post_target)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/__init__.py b/test/lib/ansible_test/_internal/commands/sanity/__init__.py
new file mode 100644
index 0000000..00b3031
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/__init__.py
@@ -0,0 +1,1173 @@
+"""Execute Ansible sanity tests."""
+from __future__ import annotations
+
+import abc
+import glob
+import hashlib
+import json
+import os
+import pathlib
+import re
+import collections
+import collections.abc as c
+import typing as t
+
+from ...constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from ...io import (
+ read_json_file,
+ write_json_file,
+ write_text_file,
+)
+
+from ...util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+ import_plugins,
+ load_plugins,
+ parse_to_list_of_dict,
+ ANSIBLE_TEST_CONTROLLER_ROOT,
+ ANSIBLE_TEST_TARGET_ROOT,
+ is_binary_file,
+ read_lines_without_comments,
+ is_subdir,
+ paths_to_dirs,
+ get_ansible_version,
+ str_to_version,
+ cache,
+ remove_tree,
+)
+
+from ...util_common import (
+ intercept_python,
+ handle_layout_messages,
+ yamlcheck,
+ create_result_directories,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+)
+
+from ...target import (
+ walk_internal_targets,
+ walk_sanity_targets,
+ TestTarget,
+)
+
+from ...executor import (
+ get_changes_filter,
+ AllTargetsSkipped,
+ Delegate,
+)
+
+from ...python_requirements import (
+ PipInstall,
+ collect_requirements,
+ run_pip,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...test import (
+ TestSuccess,
+ TestFailure,
+ TestSkipped,
+ TestMessage,
+ TestResult,
+ calculate_best_confidence,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...content_config import (
+ get_content_config,
+)
+
+from ...host_configs import (
+ DockerConfig,
+ PosixConfig,
+ PythonConfig,
+ VirtualPythonConfig,
+)
+
+from ...host_profiles import (
+ PosixProfile,
+)
+
+from ...provisioning import (
+ prepare_profiles,
+)
+
+from ...pypi_proxy import (
+ configure_pypi_proxy,
+)
+
+from ...venv import (
+ create_virtual_environment,
+)
+
+COMMAND = 'sanity'
+SANITY_ROOT = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'sanity')
+TARGET_SANITY_ROOT = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'sanity')
+
+# NOTE: must match ansible.constants.DOCUMENTABLE_PLUGINS, but with 'module' replaced by 'modules'!
+DOCUMENTABLE_PLUGINS = (
+ 'become', 'cache', 'callback', 'cliconf', 'connection', 'httpapi', 'inventory', 'lookup', 'netconf', 'modules', 'shell', 'strategy', 'vars'
+)
+
+created_venvs: list[str] = []
+
+
+def command_sanity(args: SanityConfig) -> None:
+ """Run sanity tests."""
+ create_result_directories(args)
+
+ target_configs = t.cast(list[PosixConfig], args.targets)
+ target_versions: dict[str, PosixConfig] = {target.python.version: target for target in target_configs}
+
+ handle_layout_messages(data_context().content.sanity_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ targets = SanityTargets.create(args.include, args.exclude, require)
+
+ if not targets.include:
+ raise AllTargetsSkipped()
+
+ tests = list(sanity_get_tests())
+
+ if args.test:
+ disabled = []
+ tests = [target for target in tests if target.name in args.test]
+ else:
+ disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled]
+ tests = [target for target in tests if target.enabled or args.allow_disabled]
+
+ if args.skip_test:
+ tests = [target for target in tests if target.name not in args.skip_test]
+
+ targets_use_pypi = any(isinstance(test, SanityMultipleVersion) and test.needs_pypi for test in tests) and not args.list_tests
+ host_state = prepare_profiles(args, targets_use_pypi=targets_use_pypi) # sanity
+
+ get_content_config(args) # make sure content config has been parsed prior to delegation
+
+ if args.delegate:
+ raise Delegate(host_state=host_state, require=changes, exclude=args.exclude)
+
+ configure_pypi_proxy(args, host_state.controller_profile) # sanity
+
+ if disabled:
+ display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))
+
+ target_profiles: dict[str, PosixProfile] = {profile.config.python.version: profile for profile in host_state.targets(PosixProfile)}
+
+ total = 0
+ failed = []
+
+ result: t.Optional[TestResult]
+
+ for test in tests:
+ if args.list_tests:
+ print(test.name) # display goes to stderr, this should be on stdout
+ continue
+
+ for version in SUPPORTED_PYTHON_VERSIONS:
+ options = ''
+
+ if isinstance(test, SanityMultipleVersion):
+ if version not in target_versions and version not in args.host_settings.skipped_python_versions:
+ continue # version was not requested, skip it silently
+ else:
+ if version != args.controller_python.version:
+ continue # only multi-version sanity tests use target versions, the rest use the controller version
+
+ if test.supported_python_versions and version not in test.supported_python_versions:
+ result = SanitySkipped(test.name, version)
+ result.reason = f'Skipping sanity test "{test.name}" on Python {version} because it is unsupported.' \
+ f' Supported Python versions: {", ".join(test.supported_python_versions)}'
+ else:
+ if isinstance(test, SanityCodeSmellTest):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityMultipleVersion):
+ settings = test.load_processor(args, version)
+ elif isinstance(test, SanitySingleVersion):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityVersionNeutral):
+ settings = test.load_processor(args)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+
+ all_targets = list(targets.targets)
+
+ if test.all_targets:
+ usable_targets = list(targets.targets)
+ elif test.no_targets:
+ usable_targets = []
+ else:
+ usable_targets = list(targets.include)
+
+ all_targets = SanityTargets.filter_and_inject_targets(test, all_targets)
+ usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets)
+
+ usable_targets = sorted(test.filter_targets_by_version(args, list(usable_targets), version))
+ usable_targets = settings.filter_skipped_targets(usable_targets)
+ sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets))
+
+ test_needed = bool(usable_targets or test.no_targets or args.prime_venvs)
+ result = None
+
+ if test_needed and version in args.host_settings.skipped_python_versions:
+ # Deferred checking of Python availability. Done here since it is now known to be required for running the test.
+ # Earlier checking could cause a spurious warning to be generated for a collection which does not support the Python version.
+ # If the user specified a Python version, an error will be generated before reaching this point when the Python interpreter is not found.
+ result = SanitySkipped(test.name, version)
+ result.reason = f'Skipping sanity test "{test.name}" on Python {version} because it could not be found.'
+
+ if not result:
+ if isinstance(test, SanityMultipleVersion):
+ display.info(f'Running sanity test "{test.name}" on Python {version}')
+ else:
+ display.info(f'Running sanity test "{test.name}"')
+
+ if test_needed and not result:
+ if isinstance(test, SanityMultipleVersion):
+ # multi-version sanity tests handle their own requirements (if any) and use the target python
+ test_profile = target_profiles[version]
+ result = test.test(args, sanity_targets, test_profile.python)
+ options = ' --python %s' % version
+ elif isinstance(test, SanitySingleVersion):
+ # single version sanity tests use the controller python
+ test_profile = host_state.controller_profile
+ virtualenv_python = create_sanity_virtualenv(args, test_profile.python, test.name)
+
+ if virtualenv_python:
+ virtualenv_yaml = check_sanity_virtualenv_yaml(virtualenv_python)
+
+ if test.require_libyaml and not virtualenv_yaml:
+ result = SanitySkipped(test.name)
+ result.reason = f'Skipping sanity test "{test.name}" on Python {version} due to missing libyaml support in PyYAML.'
+ else:
+ if virtualenv_yaml is False:
+ display.warning(f'Sanity test "{test.name}" on Python {version} may be slow due to missing libyaml support in PyYAML.')
+
+ if args.prime_venvs:
+ result = SanitySkipped(test.name)
+ else:
+ result = test.test(args, sanity_targets, virtualenv_python)
+ else:
+ result = SanitySkipped(test.name, version)
+ result.reason = f'Skipping sanity test "{test.name}" on Python {version} due to missing virtual environment support.'
+ elif isinstance(test, SanityVersionNeutral):
+ if args.prime_venvs:
+ result = SanitySkipped(test.name)
+ else:
+ # version neutral sanity tests handle their own requirements (if any)
+ result = test.test(args, sanity_targets)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+ elif result:
+ pass
+ else:
+ result = SanitySkipped(test.name, version)
+
+ result.write(args)
+
+ total += 1
+
+ if isinstance(result, SanityFailure):
+ failed.append(result.test + options)
+
+ controller = args.controller
+
+ if created_venvs and isinstance(controller, DockerConfig) and controller.name == 'default' and not args.prime_venvs:
+ names = ', '.join(created_venvs)
+ display.warning(f'There following sanity test virtual environments are out-of-date in the "default" container: {names}')
+
+ if failed:
+ message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
+ len(failed), total, '\n'.join(failed))
+
+ if args.failure_ok:
+ display.error(message)
+ else:
+ raise ApplicationError(message)
+
+
+@cache
+def collect_code_smell_tests() -> tuple[SanityTest, ...]:
+ """Return a tuple of available code smell sanity tests."""
+ paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py'))
+
+ if data_context().content.is_ansible:
+ # include Ansible specific code-smell tests which are not configured to be skipped
+ ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell')
+ skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True)
+ paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests)
+
+ tests = tuple(SanityCodeSmellTest(p) for p in paths)
+
+ return tests
+
+
+class SanityIgnoreParser:
+ """Parser for the consolidated sanity test ignore file."""
+ NO_CODE = '_'
+
+ def __init__(self, args: SanityConfig) -> None:
+ if data_context().content.collection:
+ ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2])
+
+ ansible_label = 'Ansible %s' % ansible_version
+ file_name = 'ignore-%s.txt' % ansible_version
+ else:
+ ansible_label = 'Ansible'
+ file_name = 'ignore.txt'
+
+ self.args = args
+ self.relative_path = os.path.join(data_context().content.sanity_path, file_name)
+ self.path = os.path.join(data_context().content.root, self.relative_path)
+ self.ignores: dict[str, dict[str, dict[str, int]]] = collections.defaultdict(lambda: collections.defaultdict(dict))
+ self.skips: dict[str, dict[str, int]] = collections.defaultdict(lambda: collections.defaultdict(int))
+ self.parse_errors: list[tuple[int, int, str]] = []
+ self.file_not_found_errors: list[tuple[int, str]] = []
+
+ lines = read_lines_without_comments(self.path, optional=True)
+ targets = SanityTargets.get_targets()
+ paths = set(target.path for target in targets)
+ tests_by_name: dict[str, SanityTest] = {}
+ versioned_test_names: set[str] = set()
+ unversioned_test_names: dict[str, str] = {}
+ directories = paths_to_dirs(list(paths))
+ paths_by_test: dict[str, set[str]] = {}
+
+ display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1)
+
+ for test in sanity_get_tests():
+ test_targets = SanityTargets.filter_and_inject_targets(test, targets)
+
+ if isinstance(test, SanityMultipleVersion):
+ versioned_test_names.add(test.name)
+
+ for python_version in test.supported_python_versions:
+ test_name = '%s-%s' % (test.name, python_version)
+
+ paths_by_test[test_name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, python_version))
+ tests_by_name[test_name] = test
+ else:
+ unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS))
+
+ paths_by_test[test.name] = set(target.path for target in test.filter_targets_by_version(args, test_targets, ''))
+ tests_by_name[test.name] = test
+
+ for line_no, line in enumerate(lines, start=1):
+ if not line:
+ self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment"))
+ continue
+
+ parts = line.split(' ')
+ path = parts[0]
+ codes = parts[1:]
+
+ if not path:
+ self.parse_errors.append((line_no, 1, "Line cannot start with a space"))
+ continue
+
+ if path.endswith(os.path.sep):
+ if path not in directories:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+ else:
+ if path not in paths:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+
+ if not codes:
+ self.parse_errors.append((line_no, len(path), "Error code required after path"))
+ continue
+
+ code = codes[0]
+
+ if not code:
+ self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty"))
+ continue
+
+ if len(codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces"))
+ continue
+
+ parts = code.split('!')
+ code = parts[0]
+ commands = parts[1:]
+
+ parts = code.split(':')
+ test_name = parts[0]
+ error_codes = parts[1:]
+
+ test = tests_by_name.get(test_name)
+
+ if not test:
+ unversioned_name = unversioned_test_names.get(test_name)
+
+ if unversioned_name:
+ self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % (
+ unversioned_name, test_name)))
+ elif test_name in versioned_test_names:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % (
+ test_name, test_name, args.controller_python.version)))
+ else:
+ self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name))
+
+ continue
+
+ if path.endswith(os.path.sep) and not test.include_directories:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name))
+ continue
+
+ if path not in paths_by_test[test_name] and not test.no_targets:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path)))
+ continue
+
+ if commands and error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters"))
+ continue
+
+ if commands:
+ command = commands[0]
+
+ if len(commands) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters"))
+ continue
+
+ if command == 'skip':
+ if not test.can_skip:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name))
+ continue
+
+ existing_line_no = self.skips.get(test_name, {}).get(path)
+
+ if existing_line_no:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no)))
+ continue
+
+ self.skips[test_name][path] = line_no
+ continue
+
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command))
+ continue
+
+ if not test.can_ignore:
+ self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name))
+ continue
+
+ if test.error_code:
+ if not error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name))
+ continue
+
+ error_code = error_codes[0]
+
+ if len(error_codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters"))
+ continue
+
+ if error_code in test.optional_error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 3, "Optional error code '%s' cannot be ignored" % (
+ error_code)))
+ continue
+ else:
+ if error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name))
+ continue
+
+ error_code = self.NO_CODE
+
+ existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code)
+
+ if existing:
+ if test.error_code:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % (
+ test_name, error_code, path, existing)))
+ else:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % (
+ test_name, path, existing)))
+
+ continue
+
+ self.ignores[test_name][path][error_code] = line_no
+
+ @staticmethod
+ def load(args: SanityConfig) -> SanityIgnoreParser:
+ """Return the current SanityIgnore instance, initializing it if needed."""
+ try:
+ return SanityIgnoreParser.instance # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ instance = SanityIgnoreParser(args)
+
+ SanityIgnoreParser.instance = instance # type: ignore[attr-defined]
+
+ return instance
+
+
+class SanityIgnoreProcessor:
+ """Processor for sanity test ignores for a single run of one sanity test."""
+ def __init__(self,
+ args: SanityConfig,
+ test: SanityTest,
+ python_version: t.Optional[str],
+ ) -> None:
+ name = test.name
+ code = test.error_code
+
+ if python_version:
+ full_name = '%s-%s' % (name, python_version)
+ else:
+ full_name = name
+
+ self.args = args
+ self.test = test
+ self.code = code
+ self.parser = SanityIgnoreParser.load(args)
+ self.ignore_entries = self.parser.ignores.get(full_name, {})
+ self.skip_entries = self.parser.skips.get(full_name, {})
+ self.used_line_numbers: set[int] = set()
+
+ def filter_skipped_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given targets, with any skipped paths filtered out."""
+ return sorted(target for target in targets if target.path not in self.skip_entries)
+
+ def process_errors(self, errors: list[SanityMessage], paths: list[str]) -> list[SanityMessage]:
+ """Return the given errors filtered for ignores and with any settings related errors included."""
+ errors = self.filter_messages(errors)
+ errors.extend(self.get_errors(paths))
+
+ errors = sorted(set(errors))
+
+ return errors
+
+ def filter_messages(self, messages: list[SanityMessage]) -> list[SanityMessage]:
+ """Return a filtered list of the given messages using the entries that have been loaded."""
+ filtered = []
+
+ for message in messages:
+ if message.code in self.test.optional_error_codes and not self.args.enable_optional_errors:
+ continue
+
+ path_entry = self.ignore_entries.get(message.path)
+
+ if path_entry:
+ code = message.code if self.code else SanityIgnoreParser.NO_CODE
+ line_no = path_entry.get(code)
+
+ if line_no:
+ self.used_line_numbers.add(line_no)
+ continue
+
+ filtered.append(message)
+
+ return filtered
+
+ def get_errors(self, paths: list[str]) -> list[SanityMessage]:
+ """Return error messages related to issues with the file."""
+ messages: list[SanityMessage] = []
+
+ # unused errors
+
+ unused: list[tuple[int, str, str]] = []
+
+ if self.test.no_targets or self.test.all_targets:
+ # tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked
+ targets = SanityTargets.get_targets()
+ test_targets = SanityTargets.filter_and_inject_targets(self.test, targets)
+ paths = [target.path for target in test_targets]
+
+ for path in paths:
+ path_entry = self.ignore_entries.get(path)
+
+ if not path_entry:
+ continue
+
+ unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers)
+
+ messages.extend(SanityMessage(
+ code=self.code,
+ message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path,
+ path=self.parser.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None,
+ ) for line, path, code in unused)
+
+ return messages
+
+
+class SanitySuccess(TestSuccess):
+ """Sanity test success."""
+ def __init__(self, test: str, python_version: t.Optional[str] = None) -> None:
+ super().__init__(COMMAND, test, python_version)
+
+
+class SanitySkipped(TestSkipped):
+ """Sanity test skipped."""
+ def __init__(self, test: str, python_version: t.Optional[str] = None) -> None:
+ super().__init__(COMMAND, test, python_version)
+
+
+class SanityFailure(TestFailure):
+ """Sanity test failure."""
+ def __init__(
+ self,
+ test: str,
+ python_version: t.Optional[str] = None,
+ messages: t.Optional[c.Sequence[SanityMessage]] = None,
+ summary: t.Optional[str] = None,
+ ) -> None:
+ super().__init__(COMMAND, test, python_version, messages, summary)
+
+
+class SanityMessage(TestMessage):
+ """Single sanity test message for one file."""
+
+
+class SanityTargets:
+ """Sanity test target information."""
+ def __init__(self, targets: tuple[TestTarget, ...], include: tuple[TestTarget, ...]) -> None:
+ self.targets = targets
+ self.include = include
+
+ @staticmethod
+ def create(include: list[str], exclude: list[str], require: list[str]) -> SanityTargets:
+ """Create a SanityTargets instance from the given include, exclude and require lists."""
+ _targets = SanityTargets.get_targets()
+ _include = walk_internal_targets(_targets, include, exclude, require)
+ return SanityTargets(_targets, _include)
+
+ @staticmethod
+ def filter_and_inject_targets(test: SanityTest, targets: c.Iterable[TestTarget]) -> list[TestTarget]:
+ """Filter and inject targets based on test requirements and the given target list."""
+ test_targets = list(targets)
+
+ if not test.include_symlinks:
+ # remove all symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ if not test.include_directories or not test.include_symlinks:
+ # exclude symlinked directories unless supported by the test
+ test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)]
+
+ if test.include_directories:
+ # include directories containing any of the included files
+ test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets]))
+
+ if not test.include_symlinks:
+ # remove all directory symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ return test_targets
+
+ @staticmethod
+ def get_targets() -> tuple[TestTarget, ...]:
+ """Return a tuple of sanity test targets. Uses a cached version when available."""
+ try:
+ return SanityTargets.get_targets.targets # type: ignore[attr-defined]
+ except AttributeError:
+ targets = tuple(sorted(walk_sanity_targets()))
+
+ SanityTargets.get_targets.targets = targets # type: ignore[attr-defined]
+
+ return targets
+
+
+class SanityTest(metaclass=abc.ABCMeta):
+ """Sanity test base class."""
+ ansible_only = False
+
+ def __init__(self, name: t.Optional[str] = None) -> None:
+ if not name:
+ name = self.__class__.__name__
+ name = re.sub(r'Test$', '', name) # drop Test suffix
+ name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization
+
+ self.name = name
+ self.enabled = True
+
+ # Optional error codes represent errors which spontaneously occur without changes to the content under test, such as those based on the current date.
+ # Because these errors can be unpredictable they behave differently than normal error codes:
+ # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors.
+ # * They cannot be ignored. This is done to maintain the integrity of the ignore system.
+ self.optional_error_codes: set[str] = set()
+
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return None
+
+ @property
+ def can_ignore(self) -> bool:
+ """True if the test supports ignore entries."""
+ return True
+
+ @property
+ def can_skip(self) -> bool:
+ """True if the test supports skip entries."""
+ return not self.all_targets and not self.no_targets
+
+ @property
+ def all_targets(self) -> bool:
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return False
+
+ @property
+ def no_targets(self) -> bool:
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return False
+
+ @property
+ def include_directories(self) -> bool:
+ """True if the test targets should include directories."""
+ return False
+
+ @property
+ def include_symlinks(self) -> bool:
+ """True if the test targets should include symlinks."""
+ return False
+
+ @property
+ def py2_compat(self) -> bool:
+ """True if the test only applies to code that runs on Python 2.x."""
+ return False
+
+ @property
+ def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return CONTROLLER_PYTHON_VERSIONS
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]: # pylint: disable=unused-argument
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name)
+
+ def filter_targets_by_version(self, args: SanityConfig, targets: list[TestTarget], python_version: str) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version."""
+ del python_version # python_version is not used here, but derived classes may make use of it
+
+ targets = self.filter_targets(targets)
+
+ if self.py2_compat:
+ # This sanity test is a Python 2.x compatibility test.
+ content_config = get_content_config(args)
+
+ if content_config.py2_support:
+ # This collection supports Python 2.x.
+ # Filter targets to include only those that require support for remote-only Python versions.
+ targets = self.filter_remote_targets(targets)
+ else:
+ # This collection does not support Python 2.x.
+ # There are no targets to test.
+ targets = []
+
+ return targets
+
+ @staticmethod
+ def filter_remote_targets(targets: list[TestTarget]) -> list[TestTarget]:
+ """Return a filtered list of the given targets, including only those that require support for remote-only Python versions."""
+ targets = [target for target in targets if (
+ is_subdir(target.path, data_context().content.module_path) or
+ is_subdir(target.path, data_context().content.module_utils_path) or
+ is_subdir(target.path, data_context().content.unit_module_path) or
+ is_subdir(target.path, data_context().content.unit_module_utils_path) or
+ # include modules/module_utils within integration test library directories
+ re.search('^%s/.*/library/' % re.escape(data_context().content.integration_targets_path), target.path) or
+ # special handling for content in ansible-core
+ (data_context().content.is_ansible and (
+ # utility code that runs in target environments and requires support for remote-only Python versions
+ is_subdir(target.path, 'test/lib/ansible_test/_util/target/') or
+ # integration test support modules/module_utils continue to require support for remote-only Python versions
+ re.search('^test/support/integration/.*/(modules|module_utils)/', target.path) or
+ # collection loader requires support for remote-only Python versions
+ re.search('^lib/ansible/utils/collection_loader/', target.path)
+ ))
+ )]
+
+ return targets
+
+
+class SanitySingleVersion(SanityTest, metaclass=abc.ABCMeta):
+ """Base class for sanity test plugins which should run on a single python version."""
+ @property
+ def require_libyaml(self) -> bool:
+ """True if the test requires PyYAML to have libyaml support."""
+ return False
+
+ @abc.abstractmethod
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ """Run the sanity test and return the result."""
+
+ def load_processor(self, args: SanityConfig) -> SanityIgnoreProcessor:
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityCodeSmellTest(SanitySingleVersion):
+ """Sanity test script."""
+ def __init__(self, path) -> None:
+ name = os.path.splitext(os.path.basename(path))[0]
+ config_path = os.path.splitext(path)[0] + '.json'
+
+ super().__init__(name=name)
+
+ self.path = path
+ self.config_path = config_path if os.path.exists(config_path) else None
+ self.config = None
+
+ if self.config_path:
+ self.config = read_json_file(self.config_path)
+
+ if self.config:
+ self.enabled = not self.config.get('disabled')
+
+ self.output: t.Optional[str] = self.config.get('output')
+ self.extensions: list[str] = self.config.get('extensions')
+ self.prefixes: list[str] = self.config.get('prefixes')
+ self.files: list[str] = self.config.get('files')
+ self.text: t.Optional[bool] = self.config.get('text')
+ self.ignore_self: bool = self.config.get('ignore_self')
+ self.minimum_python_version: t.Optional[str] = self.config.get('minimum_python_version')
+ self.maximum_python_version: t.Optional[str] = self.config.get('maximum_python_version')
+
+ self.__all_targets: bool = self.config.get('all_targets')
+ self.__no_targets: bool = self.config.get('no_targets')
+ self.__include_directories: bool = self.config.get('include_directories')
+ self.__include_symlinks: bool = self.config.get('include_symlinks')
+ self.__py2_compat: bool = self.config.get('py2_compat', False)
+ else:
+ self.output = None
+ self.extensions = []
+ self.prefixes = []
+ self.files = []
+ self.text = None
+ self.ignore_self = False
+ self.minimum_python_version = None
+ self.maximum_python_version = None
+
+ self.__all_targets = False
+ self.__no_targets = True
+ self.__include_directories = False
+ self.__include_symlinks = False
+ self.__py2_compat = False
+
+ if self.no_targets:
+ mutually_exclusive = (
+ 'extensions',
+ 'prefixes',
+ 'files',
+ 'text',
+ 'ignore_self',
+ 'all_targets',
+ 'include_directories',
+ 'include_symlinks',
+ )
+
+ problems = sorted(name for name in mutually_exclusive if getattr(self, name))
+
+ if problems:
+ raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems)))
+
+ @property
+ def all_targets(self) -> bool:
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return self.__all_targets
+
+ @property
+ def no_targets(self) -> bool:
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return self.__no_targets
+
+ @property
+ def include_directories(self) -> bool:
+ """True if the test targets should include directories."""
+ return self.__include_directories
+
+ @property
+ def include_symlinks(self) -> bool:
+ """True if the test targets should include symlinks."""
+ return self.__include_symlinks
+
+ @property
+ def py2_compat(self) -> bool:
+ """True if the test only applies to code that runs on Python 2.x."""
+ return self.__py2_compat
+
+ @property
+ def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ versions = super().supported_python_versions
+
+ if self.minimum_python_version:
+ versions = tuple(version for version in versions if str_to_version(version) >= str_to_version(self.minimum_python_version))
+
+ if self.maximum_python_version:
+ versions = tuple(version for version in versions if str_to_version(version) <= str_to_version(self.maximum_python_version))
+
+ return versions
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ if self.text is not None:
+ if self.text:
+ targets = [target for target in targets if not is_binary_file(target.path)]
+ else:
+ targets = [target for target in targets if is_binary_file(target.path)]
+
+ if self.extensions:
+ targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions
+ or (is_subdir(target.path, 'bin') and '.py' in self.extensions)]
+
+ if self.prefixes:
+ targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)]
+
+ if self.files:
+ targets = [target for target in targets if os.path.basename(target.path) in self.files]
+
+ if self.ignore_self and data_context().content.is_ansible:
+ relative_self_path = os.path.relpath(self.path, data_context().content.root)
+ targets = [target for target in targets if target.path != relative_self_path]
+
+ return targets
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ """Run the sanity test and return the result."""
+ cmd = [python.path, self.path]
+
+ env = ansible_environment(args, color=False)
+ env.update(PYTHONUTF8='1') # force all code-smell sanity tests to run with Python UTF-8 Mode enabled
+
+ pattern = None
+ data = None
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if self.config:
+ if self.output == 'path-line-column-message':
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+ elif self.output == 'path-message':
+ pattern = '^(?P<path>[^:]*): (?P<message>.*)$'
+ else:
+ raise ApplicationError('Unsupported output type: %s' % self.output)
+
+ if not self.no_targets:
+ data = '\n'.join(paths)
+
+ if data:
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = intercept_python(args, python, cmd, data=data, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout and not stderr:
+ if pattern:
+ matches = parse_to_list_of_dict(pattern, stdout)
+
+ messages = [SanityMessage(
+ message=m['message'],
+ path=m['path'],
+ line=int(m.get('line', 0)),
+ column=int(m.get('column', 0)),
+ ) for m in matches]
+
+ messages = settings.process_errors(messages, paths)
+
+ if not messages:
+ return SanitySuccess(self.name)
+
+ return SanityFailure(self.name, messages=messages)
+
+ if stderr or status:
+ summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ return SanityFailure(self.name, summary=summary)
+
+ messages = settings.process_errors([], paths)
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
+
+ def load_processor(self, args: SanityConfig) -> SanityIgnoreProcessor:
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityVersionNeutral(SanityTest, metaclass=abc.ABCMeta):
+ """Base class for sanity test plugins which are idependent of the python version being used."""
+ @abc.abstractmethod
+ def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
+ """Run the sanity test and return the result."""
+
+ def load_processor(self, args: SanityConfig) -> SanityIgnoreProcessor:
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+ @property
+ def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return None
+
+
+class SanityMultipleVersion(SanityTest, metaclass=abc.ABCMeta):
+ """Base class for sanity test plugins which should run on multiple python versions."""
+ @abc.abstractmethod
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ """Run the sanity test and return the result."""
+
+ def load_processor(self, args: SanityConfig, python_version: str) -> SanityIgnoreProcessor:
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, python_version)
+
+ @property
+ def needs_pypi(self) -> bool:
+ """True if the test requires PyPI, otherwise False."""
+ return False
+
+ @property
+ def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return SUPPORTED_PYTHON_VERSIONS
+
+ def filter_targets_by_version(self, args: SanityConfig, targets: list[TestTarget], python_version: str) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test, taking into account the Python version."""
+ if not python_version:
+ raise Exception('python_version is required to filter multi-version tests')
+
+ targets = super().filter_targets_by_version(args, targets, python_version)
+
+ if python_version in REMOTE_ONLY_PYTHON_VERSIONS:
+ content_config = get_content_config(args)
+
+ if python_version not in content_config.modules.python_versions:
+ # when a remote-only python version is not supported there are no paths to test
+ return []
+
+ # when a remote-only python version is supported, tests must be applied only to targets that support remote-only Python versions
+ targets = self.filter_remote_targets(targets)
+
+ return targets
+
+
+@cache
+def sanity_get_tests() -> tuple[SanityTest, ...]:
+ """Return a tuple of the available sanity tests."""
+ import_plugins('commands/sanity')
+ sanity_plugins: dict[str, t.Type[SanityTest]] = {}
+ load_plugins(SanityTest, sanity_plugins)
+ sanity_plugins.pop('sanity') # SanityCodeSmellTest
+ sanity_tests = tuple(plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only)
+ sanity_tests = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
+ return sanity_tests
+
+
+def create_sanity_virtualenv(
+ args: SanityConfig,
+ python: PythonConfig,
+ name: str,
+ coverage: bool = False,
+ minimize: bool = False,
+) -> t.Optional[VirtualPythonConfig]:
+ """Return an existing sanity virtual environment matching the requested parameters or create a new one."""
+ commands = collect_requirements( # create_sanity_virtualenv()
+ python=python,
+ controller=True,
+ virtualenv=False,
+ command=None,
+ ansible=False,
+ cryptography=False,
+ coverage=coverage,
+ minimize=minimize,
+ sanity=name,
+ )
+
+ if commands:
+ label = f'sanity.{name}'
+ else:
+ label = 'sanity' # use a single virtualenv name for tests which have no requirements
+
+ # The path to the virtual environment must be kept short to avoid the 127 character shebang length limit on Linux.
+ # If the limit is exceeded, generated entry point scripts from pip installed packages will fail with syntax errors.
+ virtualenv_install = json.dumps([command.serialize() for command in commands], indent=4)
+ virtualenv_hash = hashlib.sha256(to_bytes(virtualenv_install)).hexdigest()[:8]
+ virtualenv_cache = os.path.join(os.path.expanduser('~/.ansible/test/venv'))
+ virtualenv_path = os.path.join(virtualenv_cache, label, f'{python.version}', virtualenv_hash)
+ virtualenv_marker = os.path.join(virtualenv_path, 'marker.txt')
+
+ meta_install = os.path.join(virtualenv_path, 'meta.install.json')
+ meta_yaml = os.path.join(virtualenv_path, 'meta.yaml.json')
+
+ virtualenv_python = VirtualPythonConfig(
+ version=python.version,
+ path=os.path.join(virtualenv_path, 'bin', 'python'),
+ )
+
+ if not os.path.exists(virtualenv_marker):
+ # a virtualenv without a marker is assumed to have been partially created
+ remove_tree(virtualenv_path)
+
+ if not create_virtual_environment(args, python, virtualenv_path):
+ return None
+
+ run_pip(args, virtualenv_python, commands, None) # create_sanity_virtualenv()
+
+ write_text_file(meta_install, virtualenv_install)
+
+ # false positive: pylint: disable=no-member
+ if any(isinstance(command, PipInstall) and command.has_package('pyyaml') for command in commands):
+ virtualenv_yaml = yamlcheck(virtualenv_python)
+ else:
+ virtualenv_yaml = None
+
+ write_json_file(meta_yaml, virtualenv_yaml)
+
+ created_venvs.append(f'{label}-{python.version}')
+
+ # touch the marker to keep track of when the virtualenv was last used
+ pathlib.Path(virtualenv_marker).touch()
+
+ return virtualenv_python
+
+
+def check_sanity_virtualenv_yaml(python: VirtualPythonConfig) -> t.Optional[bool]:
+ """Return True if PyYAML has libyaml support for the given sanity virtual environment, False if it does not and None if it was not found."""
+ virtualenv_path = os.path.dirname(os.path.dirname(python.path))
+ meta_yaml = os.path.join(virtualenv_path, 'meta.yaml.json')
+ virtualenv_yaml = read_json_file(meta_yaml)
+
+ return virtualenv_yaml
diff --git a/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py b/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py
new file mode 100644
index 0000000..6815f88
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/ansible_doc.py
@@ -0,0 +1,127 @@
+"""Sanity test for ansible-doc."""
+from __future__ import annotations
+
+import collections
+import os
+import re
+
+from . import (
+ DOCUMENTABLE_PLUGINS,
+ SanitySingleVersion,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SanityMessage,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ display,
+ is_subdir,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+ intercept_python,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ PythonConfig,
+)
+
+
+class AnsibleDocTest(SanitySingleVersion):
+ """Sanity test for ansible-doc."""
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type in DOCUMENTABLE_PLUGINS]
+
+ return [target for target in targets
+ if os.path.splitext(target.path)[1] == '.py'
+ and os.path.basename(target.path) != '__init__.py'
+ and any(is_subdir(target.path, path) for path in plugin_paths)
+ ]
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ doc_targets: dict[str, list[str]] = collections.defaultdict(list)
+
+ remap_types = dict(
+ modules='module',
+ )
+
+ for plugin_type, plugin_path in data_context().content.plugin_paths.items():
+ plugin_type = remap_types.get(plugin_type, plugin_type)
+
+ for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
+ plugin_parts = os.path.relpath(plugin_file_path, plugin_path).split(os.path.sep)
+ plugin_name = os.path.splitext(plugin_parts[-1])[0]
+
+ if plugin_name.startswith('_'):
+ plugin_name = plugin_name[1:]
+
+ plugin_fqcn = data_context().content.prefix + '.'.join(plugin_parts[:-1] + [plugin_name])
+
+ doc_targets[plugin_type].append(plugin_fqcn)
+
+ env = ansible_environment(args, color=False)
+ error_messages: list[SanityMessage] = []
+
+ for doc_type in sorted(doc_targets):
+ for format_option in [None, '--json']:
+ cmd = ['ansible-doc', '-t', doc_type]
+ if format_option is not None:
+ cmd.append(format_option)
+ cmd.extend(sorted(doc_targets[doc_type]))
+
+ try:
+ stdout, stderr = intercept_python(args, python, cmd, env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if status:
+ summary = '%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if stdout:
+ display.info(stdout.strip(), verbosity=3)
+
+ if stderr:
+ # ignore removed module/plugin warnings
+ stderr = re.sub(r'\[WARNING]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip()
+
+ if stderr:
+ summary = 'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ error_messages = settings.process_errors(error_messages, paths)
+
+ if error_messages:
+ return SanityFailure(self.name, messages=error_messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/compile.py b/test/lib/ansible_test/_internal/commands/sanity/compile.py
new file mode 100644
index 0000000..4505338
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/compile.py
@@ -0,0 +1,94 @@
+"""Sanity test for proper python syntax."""
+from __future__ import annotations
+
+import os
+
+from . import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SanitySkipped,
+ TARGET_SANITY_ROOT,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ display,
+ parse_to_list_of_dict,
+ is_subdir,
+)
+
+from ...util_common import (
+ run_command,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...host_configs import (
+ PythonConfig,
+)
+
+
+class CompileTest(SanityMultipleVersion):
+ """Sanity test for proper python syntax."""
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ if args.prime_venvs:
+ return SanitySkipped(self.name, python_version=python.version)
+
+ settings = self.load_processor(args, python.version)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [python.path, os.path.join(TARGET_SANITY_ROOT, 'compile', 'compile.py')]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name, python_version=python.version)
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'].replace('./', ''),
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in results]
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python.version)
+
+ return SanitySuccess(self.name, python_version=python.version)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/ignores.py b/test/lib/ansible_test/_internal/commands/sanity/ignores.py
new file mode 100644
index 0000000..6d9837d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/ignores.py
@@ -0,0 +1,84 @@
+"""Sanity test for the sanity ignore file."""
+from __future__ import annotations
+
+import os
+
+from . import (
+ SanityFailure,
+ SanityIgnoreParser,
+ SanityVersionNeutral,
+ SanitySuccess,
+ SanityMessage,
+ SanityTargets,
+)
+
+from ...test import (
+ calculate_confidence,
+ calculate_best_confidence,
+ TestResult,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+
+class IgnoresTest(SanityVersionNeutral):
+ """Sanity test for sanity test ignore entries."""
+ @property
+ def can_ignore(self) -> bool:
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self) -> bool:
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
+ sanity_ignore = SanityIgnoreParser.load(args)
+
+ messages: list[SanityMessage] = []
+
+ # parse errors
+
+ messages.extend(SanityMessage(
+ message=message,
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=column,
+ confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None,
+ ) for line, column, message in sanity_ignore.parse_errors)
+
+ # file not found errors
+
+ messages.extend(SanityMessage(
+ message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path),
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None,
+ ) for line, path in sanity_ignore.file_not_found_errors)
+
+ # conflicting ignores and skips
+
+ for test_name, ignores in sanity_ignore.ignores.items():
+ for ignore_path, ignore_entry in ignores.items():
+ skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path)
+
+ if not skip_line_no:
+ continue
+
+ for ignore_line_no in ignore_entry.values():
+ messages.append(SanityMessage(
+ message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no),
+ path=sanity_ignore.relative_path,
+ line=ignore_line_no,
+ column=1,
+ confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None,
+ ))
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/import.py b/test/lib/ansible_test/_internal/commands/sanity/import.py
new file mode 100644
index 0000000..8511d7a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/import.py
@@ -0,0 +1,217 @@
+"""Sanity test for proper import exception handling."""
+from __future__ import annotations
+
+import collections.abc as c
+import os
+
+from . import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ TARGET_SANITY_ROOT,
+ SanityTargets,
+ create_sanity_virtualenv,
+ check_sanity_virtualenv_yaml,
+)
+
+from ...constants import (
+ CONTROLLER_MIN_PYTHON_VERSION,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ cache,
+ SubprocessError,
+ display,
+ parse_to_list_of_dict,
+ is_subdir,
+ ANSIBLE_TEST_TOOLS_ROOT,
+)
+
+from ...util_common import (
+ ResultType,
+ create_temp_dir,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+)
+
+from ...python_requirements import (
+ PipUnavailableError,
+ install_requirements,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...coverage_util import (
+ cover_python,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ PythonConfig,
+)
+
+from ...venv import (
+ get_virtualenv_version,
+)
+
+
+def _get_module_test(module_restrictions: bool) -> c.Callable[[str], bool]:
+ """Create a predicate which tests whether a path can be used by modules or not."""
+ module_path = data_context().content.module_path
+ module_utils_path = data_context().content.module_utils_path
+ if module_restrictions:
+ return lambda path: is_subdir(path, module_path) or is_subdir(path, module_utils_path)
+ return lambda path: not (is_subdir(path, module_path) or is_subdir(path, module_utils_path))
+
+
+class ImportTest(SanityMultipleVersion):
+ """Sanity test for proper import exception handling."""
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if data_context().content.is_ansible:
+ # all of ansible-core must pass the import test, not just plugins/modules
+ # modules/module_utils will be tested using the module context
+ # everything else will be tested using the plugin context
+ paths = ['lib/ansible']
+ else:
+ # only plugins/modules must pass the import test for collections
+ paths = list(data_context().content.plugin_paths.values())
+
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and
+ any(is_subdir(target.path, path) for path in paths)]
+
+ @property
+ def needs_pypi(self) -> bool:
+ """True if the test requires PyPI, otherwise False."""
+ return True
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ settings = self.load_processor(args, python.version)
+
+ paths = [target.path for target in targets.include]
+
+ if python.version.startswith('2.') and (get_virtualenv_version(args, python.path) or (0,)) < (13,):
+ # hack to make sure that virtualenv is available under Python 2.x
+ # on Python 3.x we can use the built-in venv
+ # version 13+ is required to use the `--no-wheel` option
+ try:
+ install_requirements(args, python, virtualenv=True, controller=False) # sanity (import)
+ except PipUnavailableError as ex:
+ display.warning(str(ex))
+
+ temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
+
+ messages = []
+
+ for import_type, test in (
+ ('module', _get_module_test(True)),
+ ('plugin', _get_module_test(False)),
+ ):
+ if import_type == 'plugin' and python.version in REMOTE_ONLY_PYTHON_VERSIONS:
+ continue
+
+ data = '\n'.join([path for path in paths if test(path)])
+
+ if not data and not args.prime_venvs:
+ continue
+
+ virtualenv_python = create_sanity_virtualenv(args, python, f'{self.name}.{import_type}', coverage=args.coverage, minimize=True)
+
+ if not virtualenv_python:
+ display.warning(f'Skipping sanity test "{self.name}" on Python {python.version} due to missing virtual environment support.')
+ return SanitySkipped(self.name, python.version)
+
+ virtualenv_yaml = check_sanity_virtualenv_yaml(virtualenv_python)
+
+ if virtualenv_yaml is False:
+ display.warning(f'Sanity test "{self.name}" ({import_type}) on Python {python.version} may be slow due to missing libyaml support in PyYAML.')
+
+ env = ansible_environment(args, color=False)
+
+ env.update(
+ SANITY_TEMP_PATH=ResultType.TMP.path,
+ SANITY_IMPORTER_TYPE=import_type,
+ )
+
+ if data_context().content.collection:
+ external_python = create_sanity_virtualenv(args, args.controller_python, self.name)
+
+ env.update(
+ SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
+ SANITY_EXTERNAL_PYTHON=external_python.path,
+ SANITY_YAML_TO_JSON=os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'yaml_to_json.py'),
+ ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION=CONTROLLER_MIN_PYTHON_VERSION,
+ PYTHONPATH=':'.join((get_ansible_test_python_path(), env["PYTHONPATH"])),
+ )
+
+ if args.prime_venvs:
+ continue
+
+ display.info(import_type + ': ' + data, verbosity=4)
+
+ cmd = ['importer.py']
+
+ # add the importer to the path so it can be accessed through the coverage injector
+ env.update(
+ PATH=os.pathsep.join([os.path.join(TARGET_SANITY_ROOT, 'import'), env['PATH']]),
+ )
+
+ try:
+ stdout, stderr = cover_python(args, virtualenv_python, cmd, self.name, env, capture=True, data=data)
+
+ if stdout or stderr:
+ raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
+ except SubprocessError as ex:
+ if ex.status != 10 or ex.stderr or not ex.stdout:
+ raise
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ parsed = parse_to_list_of_dict(pattern, ex.stdout)
+
+ relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep
+
+ messages += [SanityMessage(
+ message=r['message'],
+ path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in parsed]
+
+ if args.prime_venvs:
+ return SanitySkipped(self.name, python_version=python.version)
+
+ results = settings.process_errors(messages, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python.version)
+
+ return SanitySuccess(self.name, python_version=python.version)
+
+
+@cache
+def get_ansible_test_python_path() -> str:
+ """
+ Return a directory usable for PYTHONPATH, containing only the ansible-test collection loader.
+ The temporary directory created will be cached for the lifetime of the process and cleaned up at exit.
+ """
+ python_path = create_temp_dir(prefix='ansible-test-')
+ return python_path
diff --git a/test/lib/ansible_test/_internal/commands/sanity/mypy.py b/test/lib/ansible_test/_internal/commands/sanity/mypy.py
new file mode 100644
index 0000000..cb8ed12
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/mypy.py
@@ -0,0 +1,259 @@
+"""Sanity test which executes mypy."""
+from __future__ import annotations
+
+import dataclasses
+import os
+import re
+import typing as t
+
+from . import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SanityTargets,
+ create_sanity_virtualenv,
+)
+
+from ...constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ display,
+ parse_to_list_of_dict,
+ ANSIBLE_TEST_CONTROLLER_ROOT,
+ ApplicationError,
+ is_subdir,
+)
+
+from ...util_common import (
+ intercept_python,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...host_configs import (
+ PythonConfig,
+ VirtualPythonConfig,
+)
+
+
+class MypyTest(SanityMultipleVersion):
+ """Sanity test which executes mypy."""
+ ansible_only = True
+
+ vendored_paths = (
+ 'lib/ansible/module_utils/six/__init__.py',
+ 'lib/ansible/module_utils/distro/_distro.py',
+ 'lib/ansible/module_utils/compat/_selectors2.py',
+ )
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and target.path not in self.vendored_paths and (
+ target.path.startswith('lib/ansible/') or target.path.startswith('test/lib/ansible_test/_internal/')
+ or target.path.startswith('test/lib/ansible_test/_util/target/sanity/import/'))]
+
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ @property
+ def needs_pypi(self) -> bool:
+ """True if the test requires PyPI, otherwise False."""
+ return True
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ settings = self.load_processor(args, python.version)
+
+ paths = [target.path for target in targets.include]
+
+ virtualenv_python = create_sanity_virtualenv(args, args.controller_python, self.name)
+
+ if args.prime_venvs:
+ return SanitySkipped(self.name, python_version=python.version)
+
+ if not virtualenv_python:
+ display.warning(f'Skipping sanity test "{self.name}" due to missing virtual environment support on Python {args.controller_python.version}.')
+ return SanitySkipped(self.name, python.version)
+
+ controller_python_versions = CONTROLLER_PYTHON_VERSIONS
+ remote_only_python_versions = REMOTE_ONLY_PYTHON_VERSIONS
+
+ contexts = (
+ MyPyContext('ansible-test', ['test/lib/ansible_test/_util/target/sanity/import/'], controller_python_versions),
+ MyPyContext('ansible-test', ['test/lib/ansible_test/_internal/'], controller_python_versions),
+ MyPyContext('ansible-core', ['lib/ansible/'], controller_python_versions),
+ MyPyContext('modules', ['lib/ansible/modules/', 'lib/ansible/module_utils/'], remote_only_python_versions),
+ )
+
+ unfiltered_messages: list[SanityMessage] = []
+
+ for context in contexts:
+ if python.version not in context.python_versions:
+ continue
+
+ unfiltered_messages.extend(self.test_context(args, virtualenv_python, python, context, paths))
+
+ notices = []
+ messages = []
+
+ for message in unfiltered_messages:
+ if message.level != 'error':
+ notices.append(message)
+ continue
+
+ match = re.search(r'^(?P<message>.*) {2}\[(?P<code>.*)]$', message.message)
+
+ messages.append(SanityMessage(
+ message=match.group('message'),
+ path=message.path,
+ line=message.line,
+ column=message.column,
+ level=message.level,
+ code=match.group('code'),
+ ))
+
+ for notice in notices:
+ display.info(notice.format(), verbosity=3)
+
+ # The following error codes from mypy indicate that results are incomplete.
+ # That prevents the test from completing successfully, just as if mypy were to traceback or generate unexpected output.
+ fatal_error_codes = {
+ 'import',
+ 'syntax',
+ }
+
+ fatal_errors = [message for message in messages if message.code in fatal_error_codes]
+
+ if fatal_errors:
+ error_message = '\n'.join(error.format() for error in fatal_errors)
+ raise ApplicationError(f'Encountered {len(fatal_errors)} fatal errors reported by mypy:\n{error_message}')
+
+ paths_set = set(paths)
+
+ # Only report messages for paths that were specified as targets.
+ # Imports in our code are followed by mypy in order to perform its analysis, which is important for accurate results.
+ # However, it will also report issues on those files, which is not the desired behavior.
+ messages = [message for message in messages if message.path in paths_set]
+
+ results = settings.process_errors(messages, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python.version)
+
+ return SanitySuccess(self.name, python_version=python.version)
+
+ @staticmethod
+ def test_context(
+ args: SanityConfig,
+ virtualenv_python: VirtualPythonConfig,
+ python: PythonConfig,
+ context: MyPyContext,
+ paths: list[str],
+ ) -> list[SanityMessage]:
+ """Run mypy tests for the specified context."""
+ context_paths = [path for path in paths if any(is_subdir(path, match_path) for match_path in context.paths)]
+
+ if not context_paths:
+ return []
+
+ config_path = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'sanity', 'mypy', f'{context.name}.ini')
+
+ display.info(f'Checking context "{context.name}"', verbosity=1)
+
+ env = ansible_environment(args, color=False)
+ env['MYPYPATH'] = env['PYTHONPATH']
+
+ # The --no-site-packages option should not be used, as it will prevent loading of type stubs from the sanity test virtual environment.
+
+ # Enabling the --warn-unused-configs option would help keep the config files clean.
+ # However, the option can only be used when all files in tested contexts are evaluated.
+ # Unfortunately sanity tests have no way of making that determination currently.
+ # The option is also incompatible with incremental mode and caching.
+
+ cmd = [
+ # Below are arguments common to all contexts.
+ # They are kept here to avoid repetition in each config file.
+ virtualenv_python.path,
+ '-m', 'mypy',
+ '--show-column-numbers',
+ '--show-error-codes',
+ '--no-error-summary',
+ # This is a fairly common pattern in our code, so we'll allow it.
+ '--allow-redefinition',
+ # Since we specify the path(s) to test, it's important that mypy is configured to use the default behavior of following imports.
+ '--follow-imports', 'normal',
+ # Incremental results and caching do not provide significant performance benefits.
+ # It also prevents the use of the --warn-unused-configs option.
+ '--no-incremental',
+ '--cache-dir', '/dev/null',
+ # The platform is specified here so that results are consistent regardless of what platform the tests are run from.
+ # In the future, if testing of other platforms is desired, the platform should become part of the test specification, just like the Python version.
+ '--platform', 'linux',
+ # Despite what the documentation [1] states, the --python-version option does not cause mypy to search for a corresponding Python executable.
+ # It will instead use the Python executable that is used to run mypy itself.
+ # The --python-executable option can be used to specify the Python executable, with the default being the executable used to run mypy.
+ # As a precaution, that option is used in case the behavior of mypy is updated in the future to match the documentation.
+ # That should help guarantee that the Python executable providing type hints is the one used to run mypy.
+ # [1] https://mypy.readthedocs.io/en/stable/command_line.html#cmdoption-mypy-python-version
+ '--python-executable', virtualenv_python.path,
+ '--python-version', python.version,
+ # Below are context specific arguments.
+ # They are primarily useful for listing individual 'ignore_missing_imports' entries instead of using a global ignore.
+ '--config-file', config_path,
+ ]
+
+ cmd.extend(context_paths)
+
+ try:
+ stdout, stderr = intercept_python(args, virtualenv_python, cmd, env, capture=True)
+
+ if stdout or stderr:
+ raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
+ except SubprocessError as ex:
+ if ex.status != 1 or ex.stderr or not ex.stdout:
+ raise
+
+ stdout = ex.stdout
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):((?P<column>[0-9]+):)? (?P<level>[^:]+): (?P<message>.*)$'
+
+ parsed = parse_to_list_of_dict(pattern, stdout)
+
+ messages = [SanityMessage(
+ level=r['level'],
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r.get('column') or '0'),
+ ) for r in parsed]
+
+ return messages
+
+
+@dataclasses.dataclass(frozen=True)
+class MyPyContext:
+ """Context details for a single run of mypy."""
+ name: str
+ paths: list[str]
+ python_versions: tuple[str, ...]
diff --git a/test/lib/ansible_test/_internal/commands/sanity/pep8.py b/test/lib/ansible_test/_internal/commands/sanity/pep8.py
new file mode 100644
index 0000000..5df9ace
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/pep8.py
@@ -0,0 +1,109 @@
+"""Sanity test for PEP 8 style guidelines using pycodestyle."""
+from __future__ import annotations
+
+import os
+import typing as t
+
+from . import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ read_lines_without_comments,
+ parse_to_list_of_dict,
+ is_subdir,
+)
+
+from ...util_common import (
+ run_command,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...host_configs import (
+ PythonConfig,
+)
+
+
+class Pep8Test(SanitySingleVersion):
+ """Sanity test for PEP 8 style guidelines using pycodestyle."""
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ current_ignore_file = os.path.join(SANITY_ROOT, 'pep8', 'current-ignore.txt')
+ current_ignore = sorted(read_lines_without_comments(current_ignore_file, remove_blank_lines=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [
+ python.path,
+ '-m', 'pycodestyle',
+ '--max-line-length', '160',
+ '--config', '/dev/null',
+ '--ignore', ','.join(sorted(current_ignore)),
+ ] + paths
+
+ if paths:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout:
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+ else:
+ results = []
+
+ messages = [SanityMessage(
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level='warning' if r['code'].startswith('W') else 'error',
+ code=r['code'],
+ ) for r in results]
+
+ errors = settings.process_errors(messages, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/pslint.py b/test/lib/ansible_test/_internal/commands/sanity/pslint.py
new file mode 100644
index 0000000..9136d51
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/pslint.py
@@ -0,0 +1,119 @@
+"""Sanity test using PSScriptAnalyzer."""
+from __future__ import annotations
+
+import json
+import os
+import re
+import typing as t
+
+from . import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ find_executable,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ...util_common import (
+ run_command,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...data import (
+ data_context,
+)
+
+
+class PslintTest(SanityVersionNeutral):
+ """Sanity test using PSScriptAnalyzer."""
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AnsibleTest'
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1', '.psd1')]
+
+ def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('pwsh', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmds = []
+
+ if args.controller.is_managed or args.requirements:
+ cmds.append(['pwsh', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.pslint.ps1')])
+
+ cmds.append(['pwsh', os.path.join(SANITY_ROOT, 'pslint', 'pslint.ps1')] + paths)
+
+ stdout = ''
+
+ for cmd in cmds:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ severity = [
+ 'Information',
+ 'Warning',
+ 'Error',
+ 'ParseError',
+ ]
+
+ cwd = data_context().content.root + '/'
+
+ # replace unicode smart quotes and ellipsis with ascii versions
+ stdout = re.sub('[\u2018\u2019]', "'", stdout)
+ stdout = re.sub('[\u201c\u201d]', '"', stdout)
+ stdout = re.sub('[\u2026]', '...', stdout)
+
+ messages = json.loads(stdout)
+
+ errors = [SanityMessage(
+ code=m['RuleName'],
+ message=m['Message'],
+ path=m['ScriptPath'].replace(cwd, ''),
+ line=m['Line'] or 0,
+ column=m['Column'] or 0,
+ level=severity[m['Severity']],
+ ) for m in messages]
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/pylint.py b/test/lib/ansible_test/_internal/commands/sanity/pylint.py
new file mode 100644
index 0000000..86f287a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/pylint.py
@@ -0,0 +1,270 @@
+"""Sanity test using pylint."""
+from __future__ import annotations
+
+import collections.abc as c
+import itertools
+import json
+import os
+import datetime
+import configparser
+import typing as t
+
+from . import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ display,
+ is_subdir,
+)
+
+from ...util_common import (
+ run_command,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetail,
+ CollectionDetailError,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ PythonConfig,
+)
+
+
+class PylintTest(SanitySingleVersion):
+ """Sanity test using pylint."""
+ def __init__(self) -> None:
+ super().__init__()
+ self.optional_error_codes.update([
+ 'ansible-deprecated-date',
+ 'too-complex',
+ ])
+
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ plugin_dir = os.path.join(SANITY_ROOT, 'pylint', 'plugins')
+ plugin_names = sorted(p[0] for p in [
+ os.path.splitext(p) for p in os.listdir(plugin_dir)] if p[1] == '.py' and p[0] != '__init__')
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ module_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in
+ paths if is_subdir(p, data_context().content.module_path)]
+ module_dirs = sorted({p[0] for p in module_paths if len(p) > 1})
+
+ large_module_group_threshold = 500
+ large_module_groups = [key for key, value in
+ itertools.groupby(module_paths, lambda p: p[0] if len(p) > 1 else '') if len(list(value)) > large_module_group_threshold]
+
+ large_module_group_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in paths
+ if any(is_subdir(p, os.path.join(data_context().content.module_path, g)) for g in large_module_groups)]
+ large_module_group_dirs = sorted({os.path.sep.join(p[:2]) for p in large_module_group_paths if len(p) > 2})
+
+ contexts = []
+ remaining_paths = set(paths)
+
+ def add_context(available_paths: set[str], context_name: str, context_filter: c.Callable[[str], bool]) -> None:
+ """Add the specified context to the context list, consuming available paths that match the given context filter."""
+ filtered_paths = set(p for p in available_paths if context_filter(p))
+ contexts.append((context_name, sorted(filtered_paths)))
+ available_paths -= filtered_paths
+
+ def filter_path(path_filter: str = None) -> c.Callable[[str], bool]:
+ """Return a function that filters out paths which are not a subdirectory of the given path."""
+ def context_filter(path_to_filter: str) -> bool:
+ """Return true if the given path matches, otherwise return False."""
+ return is_subdir(path_to_filter, path_filter)
+
+ return context_filter
+
+ for large_module_dir in large_module_group_dirs:
+ add_context(remaining_paths, 'modules/%s' % large_module_dir, filter_path(os.path.join(data_context().content.module_path, large_module_dir)))
+
+ for module_dir in module_dirs:
+ add_context(remaining_paths, 'modules/%s' % module_dir, filter_path(os.path.join(data_context().content.module_path, module_dir)))
+
+ add_context(remaining_paths, 'modules', filter_path(data_context().content.module_path))
+ add_context(remaining_paths, 'module_utils', filter_path(data_context().content.module_utils_path))
+
+ add_context(remaining_paths, 'units', filter_path(data_context().content.unit_path))
+
+ if data_context().content.collection:
+ add_context(remaining_paths, 'collection', lambda p: True)
+ else:
+ add_context(remaining_paths, 'validate-modules', filter_path('test/lib/ansible_test/_util/controller/sanity/validate-modules/'))
+ add_context(remaining_paths, 'validate-modules-unit', filter_path('test/lib/ansible_test/tests/validate-modules-unit/'))
+ add_context(remaining_paths, 'code-smell', filter_path('test/lib/ansible_test/_util/controller/sanity/code-smell/'))
+ add_context(remaining_paths, 'ansible-test-target', filter_path('test/lib/ansible_test/_util/target/'))
+ add_context(remaining_paths, 'ansible-test', filter_path('test/lib/'))
+ add_context(remaining_paths, 'test', filter_path('test/'))
+ add_context(remaining_paths, 'hacking', filter_path('hacking/'))
+ add_context(remaining_paths, 'ansible', lambda p: True)
+
+ messages = []
+ context_times = []
+
+ collection_detail = None
+
+ if data_context().content.collection:
+ try:
+ collection_detail = get_collection_detail(python)
+
+ if not collection_detail.version:
+ display.warning('Skipping pylint collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping pylint collection version checks since collection detail loading failed: %s' % ex.reason)
+
+ test_start = datetime.datetime.utcnow()
+
+ for context, context_paths in sorted(contexts):
+ if not context_paths:
+ continue
+
+ context_start = datetime.datetime.utcnow()
+ messages += self.pylint(args, context, context_paths, plugin_dir, plugin_names, python, collection_detail)
+ context_end = datetime.datetime.utcnow()
+
+ context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))
+
+ test_end = datetime.datetime.utcnow()
+
+ for context_time in context_times:
+ display.info(context_time, verbosity=4)
+
+ display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)
+
+ errors = [SanityMessage(
+ message=m['message'].replace('\n', ' '),
+ path=m['path'],
+ line=int(m['line']),
+ column=int(m['column']),
+ level=m['type'],
+ code=m['symbol'],
+ ) for m in messages]
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def pylint(
+ args: SanityConfig,
+ context: str,
+ paths: list[str],
+ plugin_dir: str,
+ plugin_names: list[str],
+ python: PythonConfig,
+ collection_detail: CollectionDetail,
+ ) -> list[dict[str, str]]:
+ """Run pylint using the config specified by the context on the specified paths."""
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', context.split('/')[0] + '.cfg')
+
+ if not os.path.exists(rcfile):
+ if data_context().content.collection:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'collection.cfg')
+ else:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'default.cfg')
+
+ parser = configparser.ConfigParser()
+ parser.read(rcfile)
+
+ if parser.has_section('ansible-test'):
+ config = dict(parser.items('ansible-test'))
+ else:
+ config = {}
+
+ disable_plugins = set(i.strip() for i in config.get('disable-plugins', '').split(',') if i)
+ load_plugins = set(plugin_names + ['pylint.extensions.mccabe']) - disable_plugins
+
+ cmd = [
+ python.path,
+ '-m', 'pylint',
+ '--jobs', '0',
+ '--reports', 'n',
+ '--max-line-length', '160',
+ '--max-complexity', '20',
+ '--rcfile', rcfile,
+ '--output-format', 'json',
+ '--load-plugins', ','.join(sorted(load_plugins)),
+ ] + paths
+
+ if data_context().content.collection:
+ cmd.extend(['--collection-name', data_context().content.collection.full_name])
+
+ if collection_detail and collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+
+ append_python_path = [plugin_dir]
+
+ if data_context().content.collection:
+ append_python_path.append(data_context().content.collection.root)
+
+ env = ansible_environment(args)
+ env['PYTHONPATH'] += os.path.pathsep + os.path.pathsep.join(append_python_path)
+
+ # expose plugin paths for use in custom plugins
+ env.update(dict(('ANSIBLE_TEST_%s_PATH' % k.upper(), os.path.abspath(v) + os.path.sep) for k, v in data_context().content.plugin_paths.items()))
+
+ if paths:
+ display.info('Checking %d file(s) in context "%s" with config: %s' % (len(paths), context, rcfile), verbosity=1)
+
+ try:
+ stdout, stderr = run_command(args, cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status >= 32:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if not args.explain and stdout:
+ messages = json.loads(stdout)
+ else:
+ messages = []
+
+ return messages
diff --git a/test/lib/ansible_test/_internal/commands/sanity/sanity_docs.py b/test/lib/ansible_test/_internal/commands/sanity/sanity_docs.py
new file mode 100644
index 0000000..4f14a3a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/sanity_docs.py
@@ -0,0 +1,60 @@
+"""Sanity test for documentation of sanity tests."""
+from __future__ import annotations
+
+import os
+
+from . import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ sanity_get_tests,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...data import (
+ data_context,
+)
+
+
+class SanityDocsTest(SanityVersionNeutral):
+ """Sanity test for documentation of sanity tests."""
+ ansible_only = True
+
+ @property
+ def can_ignore(self) -> bool:
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self) -> bool:
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
+ sanity_dir = 'docs/docsite/rst/dev_guide/testing/sanity'
+ sanity_docs = set(part[0] for part in (os.path.splitext(os.path.basename(path)) for path in data_context().content.get_files(sanity_dir))
+ if part[1] == '.rst')
+ sanity_tests = set(sanity_test.name for sanity_test in sanity_get_tests())
+
+ missing = sanity_tests - sanity_docs
+
+ results = []
+
+ results += [SanityMessage(
+ message='missing docs for ansible-test sanity --test %s' % r,
+ path=os.path.join(sanity_dir, '%s.rst' % r),
+ ) for r in sorted(missing)]
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/shellcheck.py b/test/lib/ansible_test/_internal/commands/sanity/shellcheck.py
new file mode 100644
index 0000000..7de0bda
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/shellcheck.py
@@ -0,0 +1,108 @@
+"""Sanity test using shellcheck."""
+from __future__ import annotations
+
+import os
+import typing as t
+
+from xml.etree.ElementTree import (
+ fromstring,
+ Element,
+)
+
+from . import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ read_lines_without_comments,
+ find_executable,
+)
+
+from ...util_common import (
+ run_command,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+
+class ShellcheckTest(SanityVersionNeutral):
+ """Sanity test using shellcheck."""
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AT1000'
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.sh']
+
+ def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
+ exclude_file = os.path.join(SANITY_ROOT, 'shellcheck', 'exclude.txt')
+ exclude = set(read_lines_without_comments(exclude_file, remove_blank_lines=True, optional=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('shellcheck', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmd = [
+ 'shellcheck',
+ '-e', ','.join(sorted(exclude)),
+ '--format', 'checkstyle',
+ ] + paths
+
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status > 1:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ # json output is missing file paths in older versions of shellcheck, so we'll use xml instead
+ root: Element = fromstring(stdout)
+
+ results = []
+
+ for item in root:
+ for entry in item:
+ results.append(SanityMessage(
+ message=entry.attrib['message'],
+ path=item.attrib['name'],
+ line=int(entry.attrib['line']),
+ column=int(entry.attrib['column']),
+ level=entry.attrib['severity'],
+ code=entry.attrib['source'].replace('ShellCheck.', ''),
+ ))
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py b/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py
new file mode 100644
index 0000000..e1dacb7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/validate_modules.py
@@ -0,0 +1,190 @@
+"""Sanity test using validate-modules."""
+from __future__ import annotations
+
+import collections
+import json
+import os
+import typing as t
+
+from . import (
+ DOCUMENTABLE_PLUGINS,
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ display,
+)
+
+from ...util_common import (
+ run_command,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetailError,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...ci import (
+ get_ci_provider,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ PythonConfig,
+)
+
+
+class ValidateModulesTest(SanitySingleVersion):
+ """Sanity test using validate-modules."""
+
+ def __init__(self) -> None:
+ super().__init__()
+
+ self.optional_error_codes.update([
+ 'deprecated-date',
+ ])
+
+ self._prefixes = {
+ plugin_type: plugin_path + '/'
+ for plugin_type, plugin_path in data_context().content.plugin_paths.items()
+ if plugin_type in DOCUMENTABLE_PLUGINS
+ }
+
+ self._exclusions = set()
+
+ if not data_context().content.collection:
+ self._exclusions.add('lib/ansible/plugins/cache/base.py')
+
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def get_plugin_type(self, target: TestTarget) -> t.Optional[str]:
+ """Return the plugin type of the given target, or None if it is not a plugin or module."""
+ if target.path.endswith('/__init__.py'):
+ return None
+
+ if target.path in self._exclusions:
+ return None
+
+ for plugin_type, prefix in self._prefixes.items():
+ if target.path.startswith(prefix):
+ return plugin_type
+
+ return None
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if self.get_plugin_type(target) is not None]
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ env = ansible_environment(args, color=False)
+
+ settings = self.load_processor(args)
+
+ target_per_type = collections.defaultdict(list)
+
+ for target in targets.include:
+ target_per_type[self.get_plugin_type(target)].append(target)
+
+ cmd = [
+ python.path,
+ os.path.join(SANITY_ROOT, 'validate-modules', 'validate.py'),
+ '--format', 'json',
+ '--arg-spec',
+ ]
+
+ if data_context().content.collection:
+ cmd.extend(['--collection', data_context().content.collection.directory])
+
+ try:
+ collection_detail = get_collection_detail(python)
+
+ if collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+ else:
+ display.warning('Skipping validate-modules collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping validate-modules collection version checks since collection detail loading failed: %s' % ex.reason)
+ else:
+ base_branch = args.base_branch or get_ci_provider().get_base_branch()
+
+ if base_branch:
+ cmd.extend([
+ '--base-branch', base_branch,
+ ])
+ else:
+ display.warning('Cannot perform module comparison against the base branch because the base branch was not detected.')
+
+ errors = []
+
+ for plugin_type, plugin_targets in sorted(target_per_type.items()):
+ paths = [target.path for target in plugin_targets]
+ plugin_cmd = list(cmd)
+
+ if plugin_type != 'modules':
+ plugin_cmd += ['--plugin-type', plugin_type]
+
+ plugin_cmd += paths
+
+ try:
+ stdout, stderr = run_command(args, plugin_cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status not in (0, 3):
+ raise SubprocessError(cmd=plugin_cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ continue
+
+ messages = json.loads(stdout)
+
+ for filename in messages:
+ output = messages[filename]
+
+ for item in output['errors']:
+ errors.append(SanityMessage(
+ path=filename,
+ line=int(item['line']) if 'line' in item else 0,
+ column=int(item['column']) if 'column' in item else 0,
+ code='%s' % item['code'],
+ message=item['msg'],
+ ))
+
+ all_paths = [target.path for target in targets.include]
+ all_errors = settings.process_errors(errors, all_paths)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if all_errors:
+ return SanityFailure(self.name, messages=all_errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/commands/sanity/yamllint.py b/test/lib/ansible_test/_internal/commands/sanity/yamllint.py
new file mode 100644
index 0000000..a0d859f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/sanity/yamllint.py
@@ -0,0 +1,125 @@
+"""Sanity test using yamllint."""
+from __future__ import annotations
+
+import json
+import os
+import typing as t
+
+from . import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ...test import (
+ TestResult,
+)
+
+from ...target import (
+ TestTarget,
+)
+
+from ...util import (
+ SubprocessError,
+ display,
+ is_subdir,
+)
+
+from ...util_common import (
+ run_command,
+)
+
+from ...config import (
+ SanityConfig,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...host_configs import (
+ PythonConfig,
+)
+
+
+class YamllintTest(SanitySingleVersion):
+ """Sanity test using yamllint."""
+ @property
+ def error_code(self) -> t.Optional[str]:
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ @property
+ def require_libyaml(self) -> bool:
+ """True if the test requires PyYAML to have libyaml support."""
+ return True
+
+ def filter_targets(self, targets: list[TestTarget]) -> list[TestTarget]:
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')]
+
+ for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()):
+ if plugin_type == 'module_utils':
+ continue
+
+ yaml_targets.extend([target for target in targets if
+ os.path.splitext(target.path)[1] == '.py' and
+ os.path.basename(target.path) != '__init__.py' and
+ is_subdir(target.path, plugin_path)])
+
+ return yaml_targets
+
+ def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ results = self.test_paths(args, paths, python)
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def test_paths(args: SanityConfig, paths: list[str], python: PythonConfig) -> list[SanityMessage]:
+ """Test the specified paths using the given Python and return the results."""
+ cmd = [
+ python.path,
+ os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'),
+ ]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return []
+
+ results = json.loads(stdout)['messages']
+
+ results = [SanityMessage(
+ code=r['code'],
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level=r['level'],
+ ) for r in results]
+
+ return results
diff --git a/test/lib/ansible_test/_internal/commands/shell/__init__.py b/test/lib/ansible_test/_internal/commands/shell/__init__.py
new file mode 100644
index 0000000..5e8c101
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/shell/__init__.py
@@ -0,0 +1,135 @@
+"""Open a shell prompt inside an ansible-test environment."""
+from __future__ import annotations
+
+import os
+import sys
+import typing as t
+
+from ...util import (
+ ApplicationError,
+ OutputStream,
+ display,
+ SubprocessError,
+ HostConnectionError,
+)
+
+from ...config import (
+ ShellConfig,
+)
+
+from ...executor import (
+ Delegate,
+)
+
+from ...connections import (
+ Connection,
+ LocalConnection,
+ SshConnection,
+)
+
+from ...host_profiles import (
+ ControllerProfile,
+ PosixProfile,
+ SshTargetHostProfile,
+)
+
+from ...provisioning import (
+ prepare_profiles,
+)
+
+from ...host_configs import (
+ ControllerConfig,
+ OriginConfig,
+)
+
+from ...inventory import (
+ create_controller_inventory,
+ create_posix_inventory,
+)
+
+
+def command_shell(args: ShellConfig) -> None:
+ """Entry point for the `shell` command."""
+ if args.raw and isinstance(args.targets[0], ControllerConfig):
+ raise ApplicationError('The --raw option has no effect on the controller.')
+
+ if not args.export and not args.cmd and not sys.stdin.isatty():
+ raise ApplicationError('Standard input must be a TTY to launch a shell.')
+
+ host_state = prepare_profiles(args, skip_setup=args.raw) # shell
+
+ if args.delegate:
+ raise Delegate(host_state=host_state)
+
+ if args.raw and not isinstance(args.controller, OriginConfig):
+ display.warning('The --raw option will only be applied to the target.')
+
+ target_profile = t.cast(SshTargetHostProfile, host_state.target_profiles[0])
+
+ if isinstance(target_profile, ControllerProfile):
+ # run the shell locally unless a target was requested
+ con: Connection = LocalConnection(args)
+
+ if args.export:
+ display.info('Configuring controller inventory.', verbosity=1)
+ create_controller_inventory(args, args.export, host_state.controller_profile)
+ else:
+ # a target was requested, connect to it over SSH
+ con = target_profile.get_controller_target_connections()[0]
+
+ if args.export:
+ display.info('Configuring target inventory.', verbosity=1)
+ create_posix_inventory(args, args.export, host_state.target_profiles, True)
+
+ if args.export:
+ return
+
+ if args.cmd:
+ # Running a command is assumed to be non-interactive. Only a shell (no command) is interactive.
+ # If we want to support interactive commands in the future, we'll need an `--interactive` command line option.
+ # Command stderr output is allowed to mix with our own output, which is all sent to stderr.
+ con.run(args.cmd, capture=False, interactive=False, output_stream=OutputStream.ORIGINAL)
+ return
+
+ if isinstance(con, SshConnection) and args.raw:
+ cmd: list[str] = []
+ elif isinstance(target_profile, PosixProfile):
+ cmd = []
+
+ if args.raw:
+ shell = 'sh' # shell required for non-ssh connection
+ else:
+ shell = 'bash'
+
+ python = target_profile.python # make sure the python interpreter has been initialized before opening a shell
+ display.info(f'Target Python {python.version} is at: {python.path}')
+
+ optional_vars = (
+ 'TERM', # keep backspace working
+ )
+
+ env = {name: os.environ[name] for name in optional_vars if name in os.environ}
+
+ if env:
+ cmd = ['/usr/bin/env'] + [f'{name}={value}' for name, value in env.items()]
+
+ cmd += [shell, '-i']
+ else:
+ cmd = []
+
+ try:
+ con.run(cmd, capture=False, interactive=True)
+ except SubprocessError as ex:
+ if isinstance(con, SshConnection) and ex.status == 255:
+ # 255 indicates SSH itself failed, rather than a command run on the remote host.
+ # In this case, report a host connection error so additional troubleshooting output is provided.
+ if not args.delegate and not args.host_path:
+ def callback() -> None:
+ """Callback to run during error display."""
+ target_profile.on_target_failure() # when the controller is not delegated, report failures immediately
+ else:
+ callback = None
+
+ raise HostConnectionError(f'SSH shell connection failed for host {target_profile.config}: {ex}', callback) from ex
+
+ raise
diff --git a/test/lib/ansible_test/_internal/commands/units/__init__.py b/test/lib/ansible_test/_internal/commands/units/__init__.py
new file mode 100644
index 0000000..f666d41
--- /dev/null
+++ b/test/lib/ansible_test/_internal/commands/units/__init__.py
@@ -0,0 +1,343 @@
+"""Execute unit tests using pytest."""
+from __future__ import annotations
+
+import os
+import sys
+import typing as t
+
+from ...constants import (
+ CONTROLLER_MIN_PYTHON_VERSION,
+ CONTROLLER_PYTHON_VERSIONS,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ...io import (
+ write_text_file,
+ make_dirs,
+)
+
+from ...util import (
+ ANSIBLE_TEST_DATA_ROOT,
+ display,
+ is_subdir,
+ str_to_version,
+ SubprocessError,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_TARGET_ROOT,
+)
+
+from ...util_common import (
+ ResultType,
+ handle_layout_messages,
+ create_temp_dir,
+)
+
+from ...ansible_util import (
+ ansible_environment,
+ get_ansible_python_path,
+)
+
+from ...target import (
+ walk_internal_targets,
+ walk_units_targets,
+)
+
+from ...config import (
+ UnitsConfig,
+)
+
+from ...coverage_util import (
+ cover_python,
+)
+
+from ...data import (
+ data_context,
+)
+
+from ...executor import (
+ AllTargetsSkipped,
+ Delegate,
+ get_changes_filter,
+)
+
+from ...python_requirements import (
+ install_requirements,
+)
+
+from ...content_config import (
+ get_content_config,
+)
+
+from ...host_configs import (
+ PosixConfig,
+)
+
+from ...provisioning import (
+ prepare_profiles,
+)
+
+from ...pypi_proxy import (
+ configure_pypi_proxy,
+)
+
+from ...host_profiles import (
+ PosixProfile,
+)
+
+
+class TestContext:
+ """Contexts that unit tests run in based on the type of content."""
+ controller = 'controller'
+ modules = 'modules'
+ module_utils = 'module_utils'
+
+
+def command_units(args: UnitsConfig) -> None:
+ """Run unit tests."""
+ handle_layout_messages(data_context().content.unit_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require)
+
+ paths = [target.path for target in include]
+
+ content_config = get_content_config(args)
+ supported_remote_python_versions = content_config.modules.python_versions
+
+ if content_config.modules.controller_only:
+ # controller-only collections run modules/module_utils unit tests as controller-only tests
+ module_paths = []
+ module_utils_paths = []
+ else:
+ # normal collections run modules/module_utils unit tests isolated from controller code due to differences in python version requirements
+ module_paths = [path for path in paths if is_subdir(path, data_context().content.unit_module_path)]
+ module_utils_paths = [path for path in paths if is_subdir(path, data_context().content.unit_module_utils_path)]
+
+ controller_paths = sorted(path for path in set(paths) - set(module_paths) - set(module_utils_paths))
+
+ remote_paths = module_paths or module_utils_paths
+
+ test_context_paths = {
+ TestContext.modules: module_paths,
+ TestContext.module_utils: module_utils_paths,
+ TestContext.controller: controller_paths,
+ }
+
+ if not paths:
+ raise AllTargetsSkipped()
+
+ targets = t.cast(list[PosixConfig], args.targets)
+ target_versions: dict[str, PosixConfig] = {target.python.version: target for target in targets}
+ skipped_versions = args.host_settings.skipped_python_versions
+ warn_versions = []
+
+ # requested python versions that are remote-only and not supported by this collection
+ test_versions = [version for version in target_versions if version in REMOTE_ONLY_PYTHON_VERSIONS and version not in supported_remote_python_versions]
+
+ if test_versions:
+ for version in test_versions:
+ display.warning(f'Skipping unit tests on Python {version} because it is not supported by this collection.'
+ f' Supported Python versions are: {", ".join(content_config.python_versions)}')
+
+ warn_versions.extend(test_versions)
+
+ if warn_versions == list(target_versions):
+ raise AllTargetsSkipped()
+
+ if not remote_paths:
+ # all selected unit tests are controller tests
+
+ # requested python versions that are remote-only
+ test_versions = [version for version in target_versions if version in REMOTE_ONLY_PYTHON_VERSIONS and version not in warn_versions]
+
+ if test_versions:
+ for version in test_versions:
+ display.warning(f'Skipping unit tests on Python {version} because it is only supported by module/module_utils unit tests.'
+ ' No module/module_utils unit tests were selected.')
+
+ warn_versions.extend(test_versions)
+
+ if warn_versions == list(target_versions):
+ raise AllTargetsSkipped()
+
+ if not controller_paths:
+ # all selected unit tests are remote tests
+
+ # requested python versions that are not supported by remote tests for this collection
+ test_versions = [version for version in target_versions if version not in supported_remote_python_versions and version not in warn_versions]
+
+ if test_versions:
+ for version in test_versions:
+ display.warning(f'Skipping unit tests on Python {version} because it is not supported by module/module_utils unit tests of this collection.'
+ f' Supported Python versions are: {", ".join(supported_remote_python_versions)}')
+
+ warn_versions.extend(test_versions)
+
+ if warn_versions == list(target_versions):
+ raise AllTargetsSkipped()
+
+ host_state = prepare_profiles(args, targets_use_pypi=True) # units
+
+ if args.delegate:
+ raise Delegate(host_state=host_state, require=changes, exclude=args.exclude)
+
+ test_sets = []
+
+ if args.requirements_mode != 'skip':
+ configure_pypi_proxy(args, host_state.controller_profile) # units
+
+ for version in SUPPORTED_PYTHON_VERSIONS:
+ if version not in target_versions and version not in skipped_versions:
+ continue
+
+ test_candidates = []
+
+ for test_context, paths in test_context_paths.items():
+ if test_context == TestContext.controller:
+ if version not in CONTROLLER_PYTHON_VERSIONS:
+ continue
+ else:
+ if version not in supported_remote_python_versions:
+ continue
+
+ if not paths:
+ continue
+
+ env = ansible_environment(args)
+
+ env.update(
+ PYTHONPATH=get_units_ansible_python_path(args, test_context),
+ ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION=CONTROLLER_MIN_PYTHON_VERSION,
+ )
+
+ test_candidates.append((test_context, paths, env))
+
+ if not test_candidates:
+ continue
+
+ if version in skipped_versions:
+ display.warning("Skipping unit tests on Python %s because it could not be found." % version)
+ continue
+
+ target_profiles: dict[str, PosixProfile] = {profile.config.python.version: profile for profile in host_state.targets(PosixProfile)}
+ target_profile = target_profiles[version]
+
+ final_candidates = [(test_context, target_profile.python, paths, env) for test_context, paths, env in test_candidates]
+ controller = any(test_context == TestContext.controller for test_context, python, paths, env in final_candidates)
+
+ if args.requirements_mode != 'skip':
+ install_requirements(args, target_profile.python, ansible=controller, command=True, controller=False) # units
+
+ test_sets.extend(final_candidates)
+
+ if args.requirements_mode == 'only':
+ sys.exit()
+
+ for test_context, python, paths, env in test_sets:
+ # When using pytest-mock, make sure that features introduced in Python 3.8 are available to older Python versions.
+ # This is done by enabling the mock_use_standalone_module feature, which forces use of mock even when unittest.mock is available.
+ # Later Python versions have not introduced additional unittest.mock features, so use of mock is not needed as of Python 3.8.
+ # If future Python versions introduce new unittest.mock features, they will not be available to older Python versions.
+ # Having the cutoff at Python 3.8 also eases packaging of ansible-core since no supported controller version requires the use of mock.
+ #
+ # NOTE: This only affects use of pytest-mock.
+ # Collection unit tests may directly import mock, which will be provided by ansible-test when it installs requirements using pip.
+ # Although mock is available for ansible-core unit tests, they should import units.compat.mock instead.
+ if str_to_version(python.version) < (3, 8):
+ config_name = 'legacy.ini'
+ else:
+ config_name = 'default.ini'
+
+ cmd = [
+ 'pytest',
+ '--forked',
+ '-r', 'a',
+ '-n', str(args.num_workers) if args.num_workers else 'auto',
+ '--color',
+ 'yes' if args.color else 'no',
+ '-p', 'no:cacheprovider',
+ '-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest', 'config', config_name),
+ '--junit-xml', os.path.join(ResultType.JUNIT.path, 'python%s-%s-units.xml' % (python.version, test_context)),
+ '--strict-markers', # added in pytest 4.5.0
+ '--rootdir', data_context().content.root,
+ ]
+
+ if not data_context().content.collection:
+ cmd.append('--durations=25')
+
+ plugins = []
+
+ if args.coverage:
+ plugins.append('ansible_pytest_coverage')
+
+ if data_context().content.collection:
+ plugins.append('ansible_pytest_collections')
+
+ if plugins:
+ env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'pytest/plugins')
+ env['PYTEST_PLUGINS'] = ','.join(plugins)
+
+ if args.collect_only:
+ cmd.append('--collect-only')
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ cmd.extend(paths)
+
+ display.info('Unit test %s with Python %s' % (test_context, python.version))
+
+ try:
+ cover_python(args, python, cmd, test_context, env, capture=False)
+ except SubprocessError as ex:
+ # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
+ if ex.status != 5:
+ raise
+
+
+def get_units_ansible_python_path(args: UnitsConfig, test_context: str) -> str:
+ """
+ Return a directory usable for PYTHONPATH, containing only the modules and module_utils portion of the ansible package.
+ The temporary directory created will be cached for the lifetime of the process and cleaned up at exit.
+ """
+ if test_context == TestContext.controller:
+ return get_ansible_python_path(args)
+
+ try:
+ cache = get_units_ansible_python_path.cache # type: ignore[attr-defined]
+ except AttributeError:
+ cache = get_units_ansible_python_path.cache = {} # type: ignore[attr-defined]
+
+ python_path = cache.get(test_context)
+
+ if python_path:
+ return python_path
+
+ python_path = create_temp_dir(prefix='ansible-test-')
+ ansible_path = os.path.join(python_path, 'ansible')
+ ansible_test_path = os.path.join(python_path, 'ansible_test')
+
+ write_text_file(os.path.join(ansible_path, '__init__.py'), '', True)
+ os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'module_utils'), os.path.join(ansible_path, 'module_utils'))
+
+ if data_context().content.collection:
+ # built-in runtime configuration for the collection loader
+ make_dirs(os.path.join(ansible_path, 'config'))
+ os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'config', 'ansible_builtin_runtime.yml'), os.path.join(ansible_path, 'config', 'ansible_builtin_runtime.yml'))
+
+ # current collection loader required by all python versions supported by the controller
+ write_text_file(os.path.join(ansible_path, 'utils', '__init__.py'), '', True)
+ os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'utils', 'collection_loader'), os.path.join(ansible_path, 'utils', 'collection_loader'))
+
+ # legacy collection loader required by all python versions not supported by the controller
+ write_text_file(os.path.join(ansible_test_path, '__init__.py'), '', True)
+ write_text_file(os.path.join(ansible_test_path, '_internal', '__init__.py'), '', True)
+ elif test_context == TestContext.modules:
+ # only non-collection ansible module tests should have access to ansible built-in modules
+ os.symlink(os.path.join(ANSIBLE_LIB_ROOT, 'modules'), os.path.join(ansible_path, 'modules'))
+
+ cache[test_context] = python_path
+
+ return python_path
diff --git a/test/lib/ansible_test/_internal/compat/__init__.py b/test/lib/ansible_test/_internal/compat/__init__.py
new file mode 100644
index 0000000..e9cb681
--- /dev/null
+++ b/test/lib/ansible_test/_internal/compat/__init__.py
@@ -0,0 +1,2 @@
+"""Nearly empty __init__.py to keep pylint happy."""
+from __future__ import annotations
diff --git a/test/lib/ansible_test/_internal/compat/packaging.py b/test/lib/ansible_test/_internal/compat/packaging.py
new file mode 100644
index 0000000..92e7736
--- /dev/null
+++ b/test/lib/ansible_test/_internal/compat/packaging.py
@@ -0,0 +1,18 @@
+"""Packaging compatibility."""
+from __future__ import annotations
+
+import typing as t
+
+try:
+ from packaging import (
+ specifiers,
+ version,
+ )
+
+ SpecifierSet: t.Optional[t.Type[specifiers.SpecifierSet]] = specifiers.SpecifierSet
+ Version: t.Optional[t.Type[version.Version]] = version.Version
+ PACKAGING_IMPORT_ERROR = None
+except ImportError as ex:
+ SpecifierSet = None # pylint: disable=invalid-name
+ Version = None # pylint: disable=invalid-name
+ PACKAGING_IMPORT_ERROR = ex
diff --git a/test/lib/ansible_test/_internal/compat/yaml.py b/test/lib/ansible_test/_internal/compat/yaml.py
new file mode 100644
index 0000000..4b47136
--- /dev/null
+++ b/test/lib/ansible_test/_internal/compat/yaml.py
@@ -0,0 +1,22 @@
+"""PyYAML compatibility."""
+from __future__ import annotations
+
+import typing as t
+
+from functools import (
+ partial,
+)
+
+try:
+ import yaml as _yaml
+ YAML_IMPORT_ERROR = None
+except ImportError as ex:
+ yaml_load = None # pylint: disable=invalid-name
+ YAML_IMPORT_ERROR = ex
+else:
+ try:
+ _SafeLoader: t.Union[t.Type[_yaml.CSafeLoader], t.Type[_yaml.SafeLoader]] = _yaml.CSafeLoader
+ except AttributeError:
+ _SafeLoader = _yaml.SafeLoader
+
+ yaml_load = partial(_yaml.load, Loader=_SafeLoader)
diff --git a/test/lib/ansible_test/_internal/completion.py b/test/lib/ansible_test/_internal/completion.py
new file mode 100644
index 0000000..f443181
--- /dev/null
+++ b/test/lib/ansible_test/_internal/completion.py
@@ -0,0 +1,310 @@
+"""Loading, parsing and storing of completion configurations."""
+from __future__ import annotations
+
+import abc
+import dataclasses
+import enum
+import os
+import typing as t
+
+from .constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from .util import (
+ ANSIBLE_TEST_DATA_ROOT,
+ cache,
+ read_lines_without_comments,
+)
+
+from .data import (
+ data_context,
+)
+
+from .become import (
+ SUPPORTED_BECOME_METHODS,
+)
+
+
+class CGroupVersion(enum.Enum):
+ """The control group version(s) required by a container."""
+ NONE = 'none'
+ V1_ONLY = 'v1-only'
+ V2_ONLY = 'v2-only'
+ V1_V2 = 'v1-v2'
+
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}.{self.name}'
+
+
+class AuditMode(enum.Enum):
+ """The audit requirements of a container."""
+ NONE = 'none'
+ REQUIRED = 'required'
+
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}.{self.name}'
+
+
+@dataclasses.dataclass(frozen=True)
+class CompletionConfig(metaclass=abc.ABCMeta):
+ """Base class for completion configuration."""
+ name: str
+
+ @property
+ @abc.abstractmethod
+ def is_default(self) -> bool:
+ """True if the completion entry is only used for defaults, otherwise False."""
+
+
+@dataclasses.dataclass(frozen=True)
+class PosixCompletionConfig(CompletionConfig, metaclass=abc.ABCMeta):
+ """Base class for completion configuration of POSIX environments."""
+ @property
+ @abc.abstractmethod
+ def supported_pythons(self) -> list[str]:
+ """Return a list of the supported Python versions."""
+
+ @abc.abstractmethod
+ def get_python_path(self, version: str) -> str:
+ """Return the path of the requested Python version."""
+
+ def get_default_python(self, controller: bool) -> str:
+ """Return the default Python version for a controller or target as specified."""
+ context_pythons = CONTROLLER_PYTHON_VERSIONS if controller else SUPPORTED_PYTHON_VERSIONS
+ version = [python for python in self.supported_pythons if python in context_pythons][0]
+ return version
+
+ @property
+ def controller_supported(self) -> bool:
+ """True if at least one Python version is provided which supports the controller, otherwise False."""
+ return any(version in CONTROLLER_PYTHON_VERSIONS for version in self.supported_pythons)
+
+
+@dataclasses.dataclass(frozen=True)
+class PythonCompletionConfig(PosixCompletionConfig, metaclass=abc.ABCMeta):
+ """Base class for completion configuration of Python environments."""
+ python: str = ''
+ python_dir: str = '/usr/bin'
+
+ @property
+ def supported_pythons(self) -> list[str]:
+ """Return a list of the supported Python versions."""
+ versions = self.python.split(',') if self.python else []
+ versions = [version for version in versions if version in SUPPORTED_PYTHON_VERSIONS]
+ return versions
+
+ def get_python_path(self, version: str) -> str:
+ """Return the path of the requested Python version."""
+ return os.path.join(self.python_dir, f'python{version}')
+
+
+@dataclasses.dataclass(frozen=True)
+class RemoteCompletionConfig(CompletionConfig):
+ """Base class for completion configuration of remote environments provisioned through Ansible Core CI."""
+ provider: t.Optional[str] = None
+ arch: t.Optional[str] = None
+
+ @property
+ def platform(self) -> str:
+ """The name of the platform."""
+ return self.name.partition('/')[0]
+
+ @property
+ def version(self) -> str:
+ """The version of the platform."""
+ return self.name.partition('/')[2]
+
+ @property
+ def is_default(self) -> bool:
+ """True if the completion entry is only used for defaults, otherwise False."""
+ return not self.version
+
+ def __post_init__(self):
+ if not self.provider:
+ raise Exception(f'Remote completion entry "{self.name}" must provide a "provider" setting.')
+
+ if not self.arch:
+ raise Exception(f'Remote completion entry "{self.name}" must provide a "arch" setting.')
+
+
+@dataclasses.dataclass(frozen=True)
+class InventoryCompletionConfig(CompletionConfig):
+ """Configuration for inventory files."""
+ def __init__(self) -> None:
+ super().__init__(name='inventory')
+
+ @property
+ def is_default(self) -> bool:
+ """True if the completion entry is only used for defaults, otherwise False."""
+ return False
+
+
+@dataclasses.dataclass(frozen=True)
+class PosixSshCompletionConfig(PythonCompletionConfig):
+ """Configuration for a POSIX host reachable over SSH."""
+ def __init__(self, user: str, host: str) -> None:
+ super().__init__(
+ name=f'{user}@{host}',
+ python=','.join(SUPPORTED_PYTHON_VERSIONS),
+ )
+
+ @property
+ def is_default(self) -> bool:
+ """True if the completion entry is only used for defaults, otherwise False."""
+ return False
+
+
+@dataclasses.dataclass(frozen=True)
+class DockerCompletionConfig(PythonCompletionConfig):
+ """Configuration for Docker containers."""
+ image: str = ''
+ seccomp: str = 'default'
+ cgroup: str = CGroupVersion.V1_V2.value
+ audit: str = AuditMode.REQUIRED.value # most containers need this, so the default is required, leaving it to be opt-out for containers which don't need it
+ placeholder: bool = False
+
+ @property
+ def is_default(self) -> bool:
+ """True if the completion entry is only used for defaults, otherwise False."""
+ return False
+
+ @property
+ def audit_enum(self) -> AuditMode:
+ """The audit requirements for the container. Raises an exception if the value is invalid."""
+ try:
+ return AuditMode(self.audit)
+ except ValueError:
+ raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.audit}" for the "audit" setting.') from None
+
+ @property
+ def cgroup_enum(self) -> CGroupVersion:
+ """The control group version(s) required by the container. Raises an exception if the value is invalid."""
+ try:
+ return CGroupVersion(self.cgroup)
+ except ValueError:
+ raise ValueError(f'Docker completion entry "{self.name}" has an invalid value "{self.cgroup}" for the "cgroup" setting.') from None
+
+ def __post_init__(self):
+ if not self.image:
+ raise Exception(f'Docker completion entry "{self.name}" must provide an "image" setting.')
+
+ if not self.supported_pythons and not self.placeholder:
+ raise Exception(f'Docker completion entry "{self.name}" must provide a "python" setting.')
+
+ # verify properties can be correctly parsed to enums
+ assert self.audit_enum
+ assert self.cgroup_enum
+
+
+@dataclasses.dataclass(frozen=True)
+class NetworkRemoteCompletionConfig(RemoteCompletionConfig):
+ """Configuration for remote network platforms."""
+ collection: str = ''
+ connection: str = ''
+ placeholder: bool = False
+
+ def __post_init__(self):
+ if not self.placeholder:
+ super().__post_init__()
+
+
+@dataclasses.dataclass(frozen=True)
+class PosixRemoteCompletionConfig(RemoteCompletionConfig, PythonCompletionConfig):
+ """Configuration for remote POSIX platforms."""
+ become: t.Optional[str] = None
+ placeholder: bool = False
+
+ def __post_init__(self):
+ if not self.placeholder:
+ super().__post_init__()
+
+ if self.become and self.become not in SUPPORTED_BECOME_METHODS:
+ raise Exception(f'POSIX remote completion entry "{self.name}" setting "become" must be omitted or one of: {", ".join(SUPPORTED_BECOME_METHODS)}')
+
+ if not self.supported_pythons:
+ if self.version and not self.placeholder:
+ raise Exception(f'POSIX remote completion entry "{self.name}" must provide a "python" setting.')
+ else:
+ if not self.version:
+ raise Exception(f'POSIX remote completion entry "{self.name}" is a platform default and cannot provide a "python" setting.')
+
+
+@dataclasses.dataclass(frozen=True)
+class WindowsRemoteCompletionConfig(RemoteCompletionConfig):
+ """Configuration for remote Windows platforms."""
+
+
+TCompletionConfig = t.TypeVar('TCompletionConfig', bound=CompletionConfig)
+
+
+def load_completion(name: str, completion_type: t.Type[TCompletionConfig]) -> dict[str, TCompletionConfig]:
+ """Load the named completion entries, returning them in dictionary form using the specified completion type."""
+ lines = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
+
+ if data_context().content.collection:
+ context = 'collection'
+ else:
+ context = 'ansible-core'
+
+ items = {name: data for name, data in [parse_completion_entry(line) for line in lines] if data.get('context', context) == context}
+
+ for item in items.values():
+ item.pop('context', None)
+ item.pop('placeholder', None)
+
+ completion = {name: completion_type(name=name, **data) for name, data in items.items()}
+
+ return completion
+
+
+def parse_completion_entry(value: str) -> tuple[str, dict[str, str]]:
+ """Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
+ values = value.split()
+
+ name = values[0]
+ data = {kvp[0]: kvp[1] if len(kvp) > 1 else '' for kvp in [item.split('=', 1) for item in values[1:]]}
+
+ return name, data
+
+
+def filter_completion(
+ completion: dict[str, TCompletionConfig],
+ controller_only: bool = False,
+ include_defaults: bool = False,
+) -> dict[str, TCompletionConfig]:
+ """Return the given completion dictionary, filtering out configs which do not support the controller if controller_only is specified."""
+ if controller_only:
+ # The cast is needed because mypy gets confused here and forgets that completion values are TCompletionConfig.
+ completion = {name: t.cast(TCompletionConfig, config) for name, config in completion.items() if
+ isinstance(config, PosixCompletionConfig) and config.controller_supported}
+
+ if not include_defaults:
+ completion = {name: config for name, config in completion.items() if not config.is_default}
+
+ return completion
+
+
+@cache
+def docker_completion() -> dict[str, DockerCompletionConfig]:
+ """Return docker completion entries."""
+ return load_completion('docker', DockerCompletionConfig)
+
+
+@cache
+def remote_completion() -> dict[str, PosixRemoteCompletionConfig]:
+ """Return remote completion entries."""
+ return load_completion('remote', PosixRemoteCompletionConfig)
+
+
+@cache
+def windows_completion() -> dict[str, WindowsRemoteCompletionConfig]:
+ """Return windows completion entries."""
+ return load_completion('windows', WindowsRemoteCompletionConfig)
+
+
+@cache
+def network_completion() -> dict[str, NetworkRemoteCompletionConfig]:
+ """Return network completion entries."""
+ return load_completion('network', NetworkRemoteCompletionConfig)
diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py
new file mode 100644
index 0000000..372c23a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/config.py
@@ -0,0 +1,353 @@
+"""Configuration classes."""
+from __future__ import annotations
+
+import dataclasses
+import enum
+import os
+import sys
+import typing as t
+
+from .util import (
+ display,
+ verify_sys_executable,
+ version_to_str,
+ type_guard,
+)
+
+from .util_common import (
+ CommonConfig,
+)
+
+from .metadata import (
+ Metadata,
+)
+
+from .data import (
+ data_context,
+)
+
+from .host_configs import (
+ ControllerConfig,
+ ControllerHostConfig,
+ HostConfig,
+ HostSettings,
+ OriginConfig,
+ PythonConfig,
+ VirtualPythonConfig,
+)
+
+THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
+
+
+class TerminateMode(enum.Enum):
+ """When to terminate instances."""
+ ALWAYS = enum.auto()
+ NEVER = enum.auto()
+ SUCCESS = enum.auto()
+
+ def __str__(self):
+ return self.name.lower()
+
+
+@dataclasses.dataclass(frozen=True)
+class ModulesConfig:
+ """Configuration for modules."""
+ python_requires: str
+ python_versions: tuple[str, ...]
+ controller_only: bool
+
+
+@dataclasses.dataclass(frozen=True)
+class ContentConfig:
+ """Configuration for all content."""
+ modules: ModulesConfig
+ python_versions: tuple[str, ...]
+ py2_support: bool
+
+
+class EnvironmentConfig(CommonConfig):
+ """Configuration common to all commands which execute in an environment."""
+ def __init__(self, args: t.Any, command: str) -> None:
+ super().__init__(args, command)
+
+ self.host_settings: HostSettings = args.host_settings
+ self.host_path: t.Optional[str] = args.host_path
+ self.containers: t.Optional[str] = args.containers
+ self.pypi_proxy: bool = args.pypi_proxy
+ self.pypi_endpoint: t.Optional[str] = args.pypi_endpoint
+
+ # Populated by content_config.get_content_config on the origin.
+ # Serialized and passed to delegated instances to avoid parsing a second time.
+ self.content_config: t.Optional[ContentConfig] = None
+
+ # Set by check_controller_python once HostState has been created by prepare_profiles.
+ # This is here for convenience, to avoid needing to pass HostState to some functions which already have access to EnvironmentConfig.
+ self.controller_python: t.Optional[PythonConfig] = None
+ """
+ The Python interpreter used by the controller.
+ Only available after delegation has been performed or skipped (if delegation is not required).
+ """
+
+ if self.host_path:
+ self.delegate = False
+ else:
+ self.delegate = (
+ not isinstance(self.controller, OriginConfig)
+ or isinstance(self.controller.python, VirtualPythonConfig)
+ or self.controller.python.version != version_to_str(sys.version_info[:2])
+ or bool(verify_sys_executable(self.controller.python.path))
+ )
+
+ self.docker_network: t.Optional[str] = args.docker_network
+ self.docker_terminate: t.Optional[TerminateMode] = args.docker_terminate
+
+ self.remote_endpoint: t.Optional[str] = args.remote_endpoint
+ self.remote_stage: t.Optional[str] = args.remote_stage
+ self.remote_terminate: t.Optional[TerminateMode] = args.remote_terminate
+
+ self.prime_containers: bool = args.prime_containers
+
+ self.requirements: bool = args.requirements
+
+ self.delegate_args: list[str] = []
+
+ self.dev_systemd_debug: bool = args.dev_systemd_debug
+ self.dev_probe_cgroups: t.Optional[str] = args.dev_probe_cgroups
+
+ def host_callback(files: list[tuple[str, str]]) -> None:
+ """Add the host files to the payload file list."""
+ config = self
+
+ if config.host_path:
+ settings_path = os.path.join(config.host_path, 'settings.dat')
+ state_path = os.path.join(config.host_path, 'state.dat')
+ config_path = os.path.join(config.host_path, 'config.dat')
+
+ files.append((os.path.abspath(settings_path), settings_path))
+ files.append((os.path.abspath(state_path), state_path))
+ files.append((os.path.abspath(config_path), config_path))
+
+ data_context().register_payload_callback(host_callback)
+
+ if args.docker_no_pull:
+ display.warning('The --docker-no-pull option is deprecated and has no effect. It will be removed in a future version of ansible-test.')
+
+ if args.no_pip_check:
+ display.warning('The --no-pip-check option is deprecated and has no effect. It will be removed in a future version of ansible-test.')
+
+ @property
+ def controller(self) -> ControllerHostConfig:
+ """Host configuration for the controller."""
+ return self.host_settings.controller
+
+ @property
+ def targets(self) -> list[HostConfig]:
+ """Host configuration for the targets."""
+ return self.host_settings.targets
+
+ def only_target(self, target_type: t.Type[THostConfig]) -> THostConfig:
+ """
+ Return the host configuration for the target.
+ Requires that there is exactly one target of the specified type.
+ """
+ targets = list(self.targets)
+
+ if len(targets) != 1:
+ raise Exception('There must be exactly one target.')
+
+ target = targets.pop()
+
+ if not isinstance(target, target_type):
+ raise Exception(f'Target is {type(target_type)} instead of {target_type}.')
+
+ return target
+
+ def only_targets(self, target_type: t.Type[THostConfig]) -> list[THostConfig]:
+ """
+ Return a list of target host configurations.
+ Requires that there are one or more targets, all the specified type.
+ """
+ if not self.targets:
+ raise Exception('There must be one or more targets.')
+
+ assert type_guard(self.targets, target_type)
+
+ return t.cast(list[THostConfig], self.targets)
+
+ @property
+ def target_type(self) -> t.Type[HostConfig]:
+ """
+ The true type of the target(s).
+ If the target is the controller, the controller type is returned.
+ Requires at least one target, and all targets must be of the same type.
+ """
+ target_types = set(type(target) for target in self.targets)
+
+ if len(target_types) != 1:
+ raise Exception('There must be one or more targets, all of the same type.')
+
+ target_type = target_types.pop()
+
+ if issubclass(target_type, ControllerConfig):
+ target_type = type(self.controller)
+
+ return target_type
+
+
+class TestConfig(EnvironmentConfig):
+ """Configuration common to all test commands."""
+ def __init__(self, args: t.Any, command: str) -> None:
+ super().__init__(args, command)
+
+ self.coverage: bool = args.coverage
+ self.coverage_check: bool = args.coverage_check
+ self.include: list[str] = args.include or []
+ self.exclude: list[str] = args.exclude or []
+ self.require: list[str] = args.require or []
+
+ self.changed: bool = args.changed
+ self.tracked: bool = args.tracked
+ self.untracked: bool = args.untracked
+ self.committed: bool = args.committed
+ self.staged: bool = args.staged
+ self.unstaged: bool = args.unstaged
+ self.changed_from: str = args.changed_from
+ self.changed_path: list[str] = args.changed_path
+ self.base_branch: str = args.base_branch
+
+ self.lint: bool = getattr(args, 'lint', False)
+ self.junit: bool = getattr(args, 'junit', False)
+ self.failure_ok: bool = getattr(args, 'failure_ok', False)
+
+ self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata()
+ self.metadata_path: t.Optional[str] = None
+
+ if self.coverage_check:
+ self.coverage = True
+
+ def metadata_callback(files: list[tuple[str, str]]) -> None:
+ """Add the metadata file to the payload file list."""
+ config = self
+
+ if config.metadata_path:
+ files.append((os.path.abspath(config.metadata_path), config.metadata_path))
+
+ data_context().register_payload_callback(metadata_callback)
+
+
+class ShellConfig(EnvironmentConfig):
+ """Configuration for the shell command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'shell')
+
+ self.cmd: list[str] = args.cmd
+ self.raw: bool = args.raw
+ self.check_layout = self.delegate # allow shell to be used without a valid layout as long as no delegation is required
+ self.interactive = sys.stdin.isatty() and not args.cmd # delegation should only be interactive when stdin is a TTY and no command was given
+ self.export: t.Optional[str] = args.export
+ self.display_stderr = True
+
+
+class SanityConfig(TestConfig):
+ """Configuration for the sanity command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'sanity')
+
+ self.test: list[str] = args.test
+ self.skip_test: list[str] = args.skip_test
+ self.list_tests: bool = args.list_tests
+ self.allow_disabled: bool = args.allow_disabled
+ self.enable_optional_errors: bool = args.enable_optional_errors
+ self.keep_git: bool = args.keep_git
+ self.prime_venvs: bool = args.prime_venvs
+
+ self.display_stderr = self.lint or self.list_tests
+
+ if self.keep_git:
+ def git_callback(files: list[tuple[str, str]]) -> None:
+ """Add files from the content root .git directory to the payload file list."""
+ for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):
+ paths = [os.path.join(dirpath, filename) for filename in filenames]
+ files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)
+
+ data_context().register_payload_callback(git_callback)
+
+
+class IntegrationConfig(TestConfig):
+ """Configuration for the integration command."""
+ def __init__(self, args: t.Any, command: str) -> None:
+ super().__init__(args, command)
+
+ self.start_at: str = args.start_at
+ self.start_at_task: str = args.start_at_task
+ self.allow_destructive: bool = args.allow_destructive
+ self.allow_root: bool = args.allow_root
+ self.allow_disabled: bool = args.allow_disabled
+ self.allow_unstable: bool = args.allow_unstable
+ self.allow_unstable_changed: bool = args.allow_unstable_changed
+ self.allow_unsupported: bool = args.allow_unsupported
+ self.retry_on_error: bool = args.retry_on_error
+ self.continue_on_error: bool = args.continue_on_error
+ self.debug_strategy: bool = args.debug_strategy
+ self.changed_all_target: str = args.changed_all_target
+ self.changed_all_mode: str = args.changed_all_mode
+ self.list_targets: bool = args.list_targets
+ self.tags = args.tags
+ self.skip_tags = args.skip_tags
+ self.diff = args.diff
+ self.no_temp_workdir: bool = args.no_temp_workdir
+ self.no_temp_unicode: bool = args.no_temp_unicode
+
+ if self.list_targets:
+ self.explain = True
+ self.display_stderr = True
+
+ def get_ansible_config(self) -> str:
+ """Return the path to the Ansible config for the given config."""
+ ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command)
+ ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path)
+
+ if not os.path.exists(ansible_config_path):
+ # use the default empty configuration unless one has been provided
+ ansible_config_path = super().get_ansible_config()
+
+ return ansible_config_path
+
+
+TIntegrationConfig = t.TypeVar('TIntegrationConfig', bound=IntegrationConfig)
+
+
+class PosixIntegrationConfig(IntegrationConfig):
+ """Configuration for the posix integration command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'integration')
+
+
+class WindowsIntegrationConfig(IntegrationConfig):
+ """Configuration for the windows integration command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'windows-integration')
+
+
+class NetworkIntegrationConfig(IntegrationConfig):
+ """Configuration for the network integration command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'network-integration')
+
+ self.testcase: str = args.testcase
+
+
+class UnitsConfig(TestConfig):
+ """Configuration for the units command."""
+ def __init__(self, args: t.Any) -> None:
+ super().__init__(args, 'units')
+
+ self.collect_only: bool = args.collect_only
+ self.num_workers: int = args.num_workers
+
+ self.requirements_mode: str = getattr(args, 'requirements_mode', '')
+
+ if self.requirements_mode == 'only':
+ self.requirements = True
+ elif self.requirements_mode == 'skip':
+ self.requirements = False
diff --git a/test/lib/ansible_test/_internal/connections.py b/test/lib/ansible_test/_internal/connections.py
new file mode 100644
index 0000000..4823b1a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/connections.py
@@ -0,0 +1,258 @@
+"""Connection abstraction for interacting with test hosts."""
+from __future__ import annotations
+
+import abc
+import shlex
+import tempfile
+import typing as t
+
+from .io import (
+ read_text_file,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .util import (
+ Display,
+ OutputStream,
+ SubprocessError,
+ retry,
+)
+
+from .util_common import (
+ run_command,
+)
+
+from .docker_util import (
+ DockerInspect,
+ docker_exec,
+ docker_inspect,
+ docker_network_disconnect,
+)
+
+from .ssh import (
+ SshConnectionDetail,
+ ssh_options_to_list,
+)
+
+from .become import (
+ Become,
+)
+
+
+class Connection(metaclass=abc.ABCMeta):
+ """Base class for connecting to a host."""
+ @abc.abstractmethod
+ def run(self,
+ command: list[str],
+ capture: bool,
+ interactive: bool = False,
+ data: t.Optional[str] = None,
+ stdin: t.Optional[t.IO[bytes]] = None,
+ stdout: t.Optional[t.IO[bytes]] = None,
+ output_stream: t.Optional[OutputStream] = None,
+ ) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command and return the result."""
+
+ def extract_archive(self,
+ chdir: str,
+ src: t.IO[bytes],
+ ):
+ """Extract the given archive file stream in the specified directory."""
+ tar_cmd = ['tar', 'oxzf', '-', '-C', chdir]
+
+ retry(lambda: self.run(tar_cmd, stdin=src, capture=True))
+
+ def create_archive(self,
+ chdir: str,
+ name: str,
+ dst: t.IO[bytes],
+ exclude: t.Optional[str] = None,
+ ):
+ """Create the specified archive file stream from the specified directory, including the given name and optionally excluding the given name."""
+ tar_cmd = ['tar', 'cf', '-', '-C', chdir]
+ gzip_cmd = ['gzip']
+
+ if exclude:
+ tar_cmd += ['--exclude', exclude]
+
+ tar_cmd.append(name)
+
+ # Using gzip to compress the archive allows this to work on all POSIX systems we support.
+ commands = [tar_cmd, gzip_cmd]
+
+ sh_cmd = ['sh', '-c', ' | '.join(shlex.join(command) for command in commands)]
+
+ retry(lambda: self.run(sh_cmd, stdout=dst, capture=True))
+
+
+class LocalConnection(Connection):
+ """Connect to localhost."""
+ def __init__(self, args: EnvironmentConfig) -> None:
+ self.args = args
+
+ def run(self,
+ command: list[str],
+ capture: bool,
+ interactive: bool = False,
+ data: t.Optional[str] = None,
+ stdin: t.Optional[t.IO[bytes]] = None,
+ stdout: t.Optional[t.IO[bytes]] = None,
+ output_stream: t.Optional[OutputStream] = None,
+ ) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command and return the result."""
+ return run_command(
+ args=self.args,
+ cmd=command,
+ capture=capture,
+ data=data,
+ stdin=stdin,
+ stdout=stdout,
+ interactive=interactive,
+ output_stream=output_stream,
+ )
+
+
+class SshConnection(Connection):
+ """Connect to a host using SSH."""
+ def __init__(self, args: EnvironmentConfig, settings: SshConnectionDetail, become: t.Optional[Become] = None) -> None:
+ self.args = args
+ self.settings = settings
+ self.become = become
+
+ self.options = ['-i', settings.identity_file]
+
+ ssh_options: dict[str, t.Union[int, str]] = dict(
+ BatchMode='yes',
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ ServerAliveInterval=15,
+ ServerAliveCountMax=4,
+ )
+
+ ssh_options.update(settings.options)
+
+ self.options.extend(ssh_options_to_list(ssh_options))
+
+ def run(self,
+ command: list[str],
+ capture: bool,
+ interactive: bool = False,
+ data: t.Optional[str] = None,
+ stdin: t.Optional[t.IO[bytes]] = None,
+ stdout: t.Optional[t.IO[bytes]] = None,
+ output_stream: t.Optional[OutputStream] = None,
+ ) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command and return the result."""
+ options = list(self.options)
+
+ if self.become:
+ command = self.become.prepare_command(command)
+
+ options.append('-q')
+
+ if interactive:
+ options.append('-tt')
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-test-ssh-debug-', suffix='.log') as ssh_logfile:
+ options.extend(['-vvv', '-E', ssh_logfile.name])
+
+ if self.settings.port:
+ options.extend(['-p', str(self.settings.port)])
+
+ options.append(f'{self.settings.user}@{self.settings.host}')
+ options.append(shlex.join(command))
+
+ def error_callback(ex: SubprocessError) -> None:
+ """Error handler."""
+ self.capture_log_details(ssh_logfile.name, ex)
+
+ return run_command(
+ args=self.args,
+ cmd=['ssh'] + options,
+ capture=capture,
+ data=data,
+ stdin=stdin,
+ stdout=stdout,
+ interactive=interactive,
+ output_stream=output_stream,
+ error_callback=error_callback,
+ )
+
+ @staticmethod
+ def capture_log_details(path: str, ex: SubprocessError) -> None:
+ """Read the specified SSH debug log and add relevant details to the provided exception."""
+ if ex.status != 255:
+ return
+
+ markers = [
+ 'debug1: Connection Established',
+ 'debug1: Authentication successful',
+ 'debug1: Entering interactive session',
+ 'debug1: Sending command',
+ 'debug2: PTY allocation request accepted',
+ 'debug2: exec request accepted',
+ ]
+
+ file_contents = read_text_file(path)
+ messages = []
+
+ for line in reversed(file_contents.splitlines()):
+ messages.append(line)
+
+ if any(line.startswith(marker) for marker in markers):
+ break
+
+ message = '\n'.join(reversed(messages))
+
+ ex.message += '>>> SSH Debug Output\n'
+ ex.message += '%s%s\n' % (message.strip(), Display.clear)
+
+
+class DockerConnection(Connection):
+ """Connect to a host using Docker."""
+ def __init__(self, args: EnvironmentConfig, container_id: str, user: t.Optional[str] = None) -> None:
+ self.args = args
+ self.container_id = container_id
+ self.user: t.Optional[str] = user
+
+ def run(self,
+ command: list[str],
+ capture: bool,
+ interactive: bool = False,
+ data: t.Optional[str] = None,
+ stdin: t.Optional[t.IO[bytes]] = None,
+ stdout: t.Optional[t.IO[bytes]] = None,
+ output_stream: t.Optional[OutputStream] = None,
+ ) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command and return the result."""
+ options = []
+
+ if self.user:
+ options.extend(['--user', self.user])
+
+ if interactive:
+ options.append('-it')
+
+ return docker_exec(
+ args=self.args,
+ container_id=self.container_id,
+ cmd=command,
+ options=options,
+ capture=capture,
+ data=data,
+ stdin=stdin,
+ stdout=stdout,
+ interactive=interactive,
+ output_stream=output_stream,
+ )
+
+ def inspect(self) -> DockerInspect:
+ """Inspect the container and return a DockerInspect instance with the results."""
+ return docker_inspect(self.args, self.container_id)
+
+ def disconnect_network(self, network: str) -> None:
+ """Disconnect the container from the specified network."""
+ docker_network_disconnect(self.args, self.container_id, network)
diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py
new file mode 100644
index 0000000..b6072fb
--- /dev/null
+++ b/test/lib/ansible_test/_internal/constants.py
@@ -0,0 +1,48 @@
+"""Constants used by ansible-test. Imports should not be used in this file (other than to import the target common constants)."""
+from __future__ import annotations
+
+from .._util.target.common.constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+)
+
+STATUS_HOST_CONNECTION_ERROR = 4
+
+# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
+# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
+SOFT_RLIMIT_NOFILE = 1024
+
+# File used to track the ansible-test test execution timeout.
+TIMEOUT_PATH = '.ansible-test-timeout.json'
+
+CONTROLLER_MIN_PYTHON_VERSION = CONTROLLER_PYTHON_VERSIONS[0]
+SUPPORTED_PYTHON_VERSIONS = REMOTE_ONLY_PYTHON_VERSIONS + CONTROLLER_PYTHON_VERSIONS
+
+REMOTE_PROVIDERS = [
+ 'default',
+ 'aws',
+ 'azure',
+ 'parallels',
+]
+
+SECCOMP_CHOICES = [
+ 'default',
+ 'unconfined',
+]
+
+# This bin symlink map must exactly match the contents of the bin directory.
+# It is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible.
+# It is also used to construct the injector directory at runtime.
+ANSIBLE_BIN_SYMLINK_MAP = {
+ 'ansible': '../lib/ansible/cli/adhoc.py',
+ 'ansible-config': '../lib/ansible/cli/config.py',
+ 'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
+ 'ansible-console': '../lib/ansible/cli/console.py',
+ 'ansible-doc': '../lib/ansible/cli/doc.py',
+ 'ansible-galaxy': '../lib/ansible/cli/galaxy.py',
+ 'ansible-inventory': '../lib/ansible/cli/inventory.py',
+ 'ansible-playbook': '../lib/ansible/cli/playbook.py',
+ 'ansible-pull': '../lib/ansible/cli/pull.py',
+ 'ansible-test': '../test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py',
+ 'ansible-vault': '../lib/ansible/cli/vault.py',
+}
diff --git a/test/lib/ansible_test/_internal/containers.py b/test/lib/ansible_test/_internal/containers.py
new file mode 100644
index 0000000..a581ecf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/containers.py
@@ -0,0 +1,974 @@
+"""High level functions for working with containers."""
+from __future__ import annotations
+
+import atexit
+import collections.abc as c
+import contextlib
+import enum
+import json
+import random
+import time
+import uuid
+import threading
+import typing as t
+
+from .util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+ sanitize_host_name,
+)
+
+from .util_common import (
+ named_temporary_file,
+)
+
+from .config import (
+ EnvironmentConfig,
+ IntegrationConfig,
+ SanityConfig,
+ ShellConfig,
+ UnitsConfig,
+ WindowsIntegrationConfig,
+)
+
+from .docker_util import (
+ ContainerNotFoundError,
+ DockerInspect,
+ docker_create,
+ docker_exec,
+ docker_inspect,
+ docker_network_inspect,
+ docker_pull,
+ docker_rm,
+ docker_run,
+ docker_start,
+ get_docker_container_id,
+ get_docker_host_ip,
+ get_podman_host_ip,
+ require_docker,
+ detect_host_properties,
+)
+
+from .ansible_util import (
+ run_playbook,
+)
+
+from .core_ci import (
+ SshKey,
+)
+
+from .target import (
+ IntegrationTarget,
+)
+
+from .ssh import (
+ SshConnectionDetail,
+ SshProcess,
+ create_ssh_port_forwards,
+ create_ssh_port_redirects,
+ generate_ssh_inventory,
+)
+
+from .host_configs import (
+ ControllerConfig,
+ DockerConfig,
+ OriginConfig,
+ PosixSshConfig,
+ PythonConfig,
+ RemoteConfig,
+ WindowsInventoryConfig,
+)
+
+from .connections import (
+ SshConnection,
+)
+
+from .thread import (
+ mutex,
+)
+
+# information about support containers provisioned by the current ansible-test instance
+support_containers: dict[str, ContainerDescriptor] = {}
+support_containers_mutex = threading.Lock()
+
+
+class HostType:
+ """Enum representing the types of hosts involved in running tests."""
+ origin = 'origin'
+ control = 'control'
+ managed = 'managed'
+
+
+class CleanupMode(enum.Enum):
+ """How container cleanup should be handled."""
+ YES = enum.auto()
+ NO = enum.auto()
+ INFO = enum.auto()
+
+
+def run_support_container(
+ args: EnvironmentConfig,
+ context: str,
+ image: str,
+ name: str,
+ ports: list[int],
+ aliases: t.Optional[list[str]] = None,
+ start: bool = True,
+ allow_existing: bool = False,
+ cleanup: t.Optional[CleanupMode] = None,
+ cmd: t.Optional[list[str]] = None,
+ env: t.Optional[dict[str, str]] = None,
+ options: t.Optional[list[str]] = None,
+ publish_ports: bool = True,
+) -> t.Optional[ContainerDescriptor]:
+ """
+ Start a container used to support tests, but not run them.
+ Containers created this way will be accessible from tests.
+ """
+ if args.prime_containers:
+ docker_pull(args, image)
+ return None
+
+ # SSH is required for publishing ports, as well as modifying the hosts file.
+ # Initializing the SSH key here makes sure it is available for use after delegation.
+ SshKey(args)
+
+ aliases = aliases or [sanitize_host_name(name)]
+
+ docker_command = require_docker().command
+ current_container_id = get_docker_container_id()
+
+ if docker_command == 'docker':
+ if isinstance(args.controller, DockerConfig) and all(isinstance(target, (ControllerConfig, DockerConfig)) for target in args.targets):
+ publish_ports = False # publishing ports is not needed when test hosts are on the docker network
+
+ if current_container_id:
+ publish_ports = False # publishing ports is pointless if already running in a docker container
+
+ options = (options or [])
+
+ if start:
+ options.append('-dt') # the -t option is required to cause systemd in the container to log output to the console
+
+ if publish_ports:
+ for port in ports:
+ options.extend(['-p', str(port)])
+
+ if env:
+ for key, value in env.items():
+ options.extend(['--env', '%s=%s' % (key, value)])
+
+ max_open_files = detect_host_properties(args).max_open_files
+
+ options.extend(['--ulimit', 'nofile=%s' % max_open_files])
+
+ support_container_id = None
+
+ if allow_existing:
+ try:
+ container = docker_inspect(args, name)
+ except ContainerNotFoundError:
+ container = None
+
+ if container:
+ support_container_id = container.id
+
+ if not container.running:
+ display.info('Ignoring existing "%s" container which is not running.' % name, verbosity=1)
+ support_container_id = None
+ elif not container.image:
+ display.info('Ignoring existing "%s" container which has the wrong image.' % name, verbosity=1)
+ support_container_id = None
+ elif publish_ports and not all(port and len(port) == 1 for port in [container.get_tcp_port(port) for port in ports]):
+ display.info('Ignoring existing "%s" container which does not have the required published ports.' % name, verbosity=1)
+ support_container_id = None
+
+ if not support_container_id:
+ docker_rm(args, name)
+
+ if args.dev_systemd_debug:
+ options.extend(('--env', 'SYSTEMD_LOG_LEVEL=debug'))
+
+ if support_container_id:
+ display.info('Using existing "%s" container.' % name)
+ running = True
+ existing = True
+ else:
+ display.info('Starting new "%s" container.' % name)
+ docker_pull(args, image)
+ support_container_id = run_container(args, image, name, options, create_only=not start, cmd=cmd)
+ running = start
+ existing = False
+
+ if cleanup is None:
+ cleanup = CleanupMode.INFO if existing else CleanupMode.YES
+
+ descriptor = ContainerDescriptor(
+ image,
+ context,
+ name,
+ support_container_id,
+ ports,
+ aliases,
+ publish_ports,
+ running,
+ existing,
+ cleanup,
+ env,
+ )
+
+ with support_containers_mutex:
+ if name in support_containers:
+ raise Exception(f'Container already defined: {name}')
+
+ if not support_containers:
+ atexit.register(cleanup_containers, args)
+
+ support_containers[name] = descriptor
+
+ display.info(f'Adding "{name}" to container database.')
+
+ if start:
+ descriptor.register(args)
+
+ return descriptor
+
+
+def run_container(
+ args: EnvironmentConfig,
+ image: str,
+ name: str,
+ options: t.Optional[list[str]],
+ cmd: t.Optional[list[str]] = None,
+ create_only: bool = False,
+) -> str:
+ """Run a container using the given docker image."""
+ options = list(options or [])
+ cmd = list(cmd or [])
+
+ options.extend(['--name', name])
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # Only when the network is not the default bridge network.
+ options.extend(['--network', network])
+
+ for _iteration in range(1, 3):
+ try:
+ if create_only:
+ stdout = docker_create(args, image, options, cmd)[0]
+ else:
+ stdout = docker_run(args, image, options, cmd)[0]
+ except SubprocessError as ex:
+ display.error(ex.message)
+ display.warning(f'Failed to run docker image "{image}". Waiting a few seconds before trying again.')
+ docker_rm(args, name) # podman doesn't remove containers after create if run fails
+ time.sleep(3)
+ else:
+ if args.explain:
+ stdout = ''.join(random.choice('0123456789abcdef') for _iteration in range(64))
+
+ return stdout.strip()
+
+ raise ApplicationError(f'Failed to run docker image "{image}".')
+
+
+def start_container(args: EnvironmentConfig, container_id: str) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Start a docker container by name or ID."""
+ options: list[str] = []
+
+ for _iteration in range(1, 3):
+ try:
+ return docker_start(args, container_id, options)
+ except SubprocessError as ex:
+ display.error(ex.message)
+ display.warning(f'Failed to start docker container "{container_id}". Waiting a few seconds before trying again.')
+ time.sleep(3)
+
+ raise ApplicationError(f'Failed to start docker container "{container_id}".')
+
+
+def get_container_ip_address(args: EnvironmentConfig, container: DockerInspect) -> t.Optional[str]:
+ """Return the IP address of the container for the preferred docker network."""
+ if container.networks:
+ network_name = get_docker_preferred_network_name(args)
+
+ if not network_name:
+ # Sort networks and use the first available.
+ # This assumes all containers will have access to the same networks.
+ network_name = sorted(container.networks.keys()).pop(0)
+
+ ipaddress = container.networks[network_name]['IPAddress']
+ else:
+ ipaddress = container.network_settings['IPAddress']
+
+ if not ipaddress:
+ return None
+
+ return ipaddress
+
+
+@mutex
+def get_docker_preferred_network_name(args: EnvironmentConfig) -> t.Optional[str]:
+ """
+ Return the preferred network name for use with Docker. The selection logic is:
+ - the network selected by the user with `--docker-network`
+ - the network of the currently running docker container (if any)
+ - the default docker network (returns None)
+ """
+ try:
+ return get_docker_preferred_network_name.network # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ network = None
+
+ if args.docker_network:
+ network = args.docker_network
+ else:
+ current_container_id = get_docker_container_id()
+
+ if current_container_id:
+ # Make sure any additional containers we launch use the same network as the current container we're running in.
+ # This is needed when ansible-test is running in a container that is not connected to Docker's default network.
+ container = docker_inspect(args, current_container_id, always=True)
+ network = container.get_network_name()
+
+ # The default docker behavior puts containers on the same network.
+ # The default podman behavior puts containers on isolated networks which don't allow communication between containers or network disconnect.
+ # Starting with podman version 2.1.0 rootless containers are able to join networks.
+ # Starting with podman version 2.2.0 containers can be disconnected from networks.
+ # To maintain feature parity with docker, detect and use the default "podman" network when running under podman.
+ if network is None and require_docker().command == 'podman' and docker_network_inspect(args, 'podman', always=True):
+ network = 'podman'
+
+ get_docker_preferred_network_name.network = network # type: ignore[attr-defined]
+
+ return network
+
+
+def is_docker_user_defined_network(network: str) -> bool:
+ """Return True if the network being used is a user-defined network."""
+ return bool(network) and network != 'bridge'
+
+
+@mutex
+def get_container_database(args: EnvironmentConfig) -> ContainerDatabase:
+ """Return the current container database, creating it as needed, or returning the one provided on the command line through delegation."""
+ try:
+ return get_container_database.database # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ if args.containers:
+ display.info('Parsing container database.', verbosity=1)
+ database = ContainerDatabase.from_dict(json.loads(args.containers))
+ else:
+ display.info('Creating container database.', verbosity=1)
+ database = create_container_database(args)
+
+ display.info('>>> Container Database\n%s' % json.dumps(database.to_dict(), indent=4, sort_keys=True), verbosity=3)
+
+ get_container_database.database = database # type: ignore[attr-defined]
+
+ return database
+
+
+class ContainerAccess:
+ """Information needed for one test host to access a single container supporting tests."""
+ def __init__(self, host_ip: str, names: list[str], ports: t.Optional[list[int]], forwards: t.Optional[dict[int, int]]) -> None:
+ # if forwards is set
+ # this is where forwards are sent (it is the host that provides an indirect connection to the containers on alternate ports)
+ # /etc/hosts uses 127.0.0.1 (since port redirection will be used)
+ # else
+ # this is what goes into /etc/hosts (it is the container's direct IP)
+ self.host_ip = host_ip
+
+ # primary name + any aliases -- these go into the hosts file and reference the appropriate ip for the origin/control/managed host
+ self.names = names
+
+ # ports available (set if forwards is not set)
+ self.ports = ports
+
+ # port redirections to create through host_ip -- if not set, no port redirections will be used
+ self.forwards = forwards
+
+ def port_map(self) -> list[tuple[int, int]]:
+ """Return a port map for accessing this container."""
+ if self.forwards:
+ ports = list(self.forwards.items())
+ else:
+ ports = [(port, port) for port in self.ports]
+
+ return ports
+
+ @staticmethod
+ def from_dict(data: dict[str, t.Any]) -> ContainerAccess:
+ """Return a ContainerAccess instance from the given dict."""
+ forwards = data.get('forwards')
+
+ if forwards:
+ forwards = dict((int(key), value) for key, value in forwards.items())
+
+ return ContainerAccess(
+ host_ip=data['host_ip'],
+ names=data['names'],
+ ports=data.get('ports'),
+ forwards=forwards,
+ )
+
+ def to_dict(self) -> dict[str, t.Any]:
+ """Return a dict of the current instance."""
+ value: dict[str, t.Any] = dict(
+ host_ip=self.host_ip,
+ names=self.names,
+ )
+
+ if self.ports:
+ value.update(ports=self.ports)
+
+ if self.forwards:
+ value.update(forwards=self.forwards)
+
+ return value
+
+
+class ContainerDatabase:
+ """Database of running containers used to support tests."""
+ def __init__(self, data: dict[str, dict[str, dict[str, ContainerAccess]]]) -> None:
+ self.data = data
+
+ @staticmethod
+ def from_dict(data: dict[str, t.Any]) -> ContainerDatabase:
+ """Return a ContainerDatabase instance from the given dict."""
+ return ContainerDatabase(dict((access_name,
+ dict((context_name,
+ dict((container_name, ContainerAccess.from_dict(container))
+ for container_name, container in containers.items()))
+ for context_name, containers in contexts.items()))
+ for access_name, contexts in data.items()))
+
+ def to_dict(self) -> dict[str, t.Any]:
+ """Return a dict of the current instance."""
+ return dict((access_name,
+ dict((context_name,
+ dict((container_name, container.to_dict())
+ for container_name, container in containers.items()))
+ for context_name, containers in contexts.items()))
+ for access_name, contexts in self.data.items())
+
+
+def local_ssh(args: EnvironmentConfig, python: PythonConfig) -> SshConnectionDetail:
+ """Return SSH connection details for localhost, connecting as root to the default SSH port."""
+ return SshConnectionDetail('localhost', 'localhost', None, 'root', SshKey(args).key, python.path)
+
+
+def root_ssh(ssh: SshConnection) -> SshConnectionDetail:
+ """Return the SSH connection details from the given SSH connection. If become was specified, the user will be changed to `root`."""
+ settings = ssh.settings.__dict__.copy()
+
+ if ssh.become:
+ settings.update(
+ user='root',
+ )
+
+ return SshConnectionDetail(**settings)
+
+
+def create_container_database(args: EnvironmentConfig) -> ContainerDatabase:
+ """Create and return a container database with information necessary for all test hosts to make use of relevant support containers."""
+ origin: dict[str, dict[str, ContainerAccess]] = {}
+ control: dict[str, dict[str, ContainerAccess]] = {}
+ managed: dict[str, dict[str, ContainerAccess]] = {}
+
+ for name, container in support_containers.items():
+ if container.details.published_ports:
+ if require_docker().command == 'podman':
+ host_ip_func = get_podman_host_ip
+ else:
+ host_ip_func = get_docker_host_ip
+ published_access = ContainerAccess(
+ host_ip=host_ip_func(),
+ names=container.aliases,
+ ports=None,
+ forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
+ )
+ else:
+ published_access = None # no published access without published ports (ports are only published if needed)
+
+ if container.details.container_ip:
+ # docker containers, and rootfull podman containers should have a container IP address
+ container_access = ContainerAccess(
+ host_ip=container.details.container_ip,
+ names=container.aliases,
+ ports=container.ports,
+ forwards=None,
+ )
+ elif require_docker().command == 'podman':
+ # published ports for rootless podman containers should be accessible from the host's IP
+ container_access = ContainerAccess(
+ host_ip=get_podman_host_ip(),
+ names=container.aliases,
+ ports=None,
+ forwards=dict((port, published_port) for port, published_port in container.details.published_ports.items()),
+ )
+ else:
+ container_access = None # no container access without an IP address
+
+ if get_docker_container_id():
+ if not container_access:
+ raise Exception('Missing IP address for container: %s' % name)
+
+ origin_context = origin.setdefault(container.context, {})
+ origin_context[name] = container_access
+ elif not published_access:
+ pass # origin does not have network access to the containers
+ else:
+ origin_context = origin.setdefault(container.context, {})
+ origin_context[name] = published_access
+
+ if isinstance(args.controller, RemoteConfig):
+ pass # SSH forwarding required
+ elif '-controller-' in name:
+ pass # hack to avoid exposing the controller container to the controller
+ elif isinstance(args.controller, DockerConfig) or (isinstance(args.controller, OriginConfig) and get_docker_container_id()):
+ if container_access:
+ control_context = control.setdefault(container.context, {})
+ control_context[name] = container_access
+ else:
+ raise Exception('Missing IP address for container: %s' % name)
+ else:
+ if not published_access:
+ raise Exception('Missing published ports for container: %s' % name)
+
+ control_context = control.setdefault(container.context, {})
+ control_context[name] = published_access
+
+ if issubclass(args.target_type, (RemoteConfig, WindowsInventoryConfig, PosixSshConfig)):
+ pass # SSH forwarding required
+ elif '-controller-' in name or '-target-' in name:
+ pass # hack to avoid exposing the controller and target containers to the target
+ elif issubclass(args.target_type, DockerConfig) or (issubclass(args.target_type, OriginConfig) and get_docker_container_id()):
+ if container_access:
+ managed_context = managed.setdefault(container.context, {})
+ managed_context[name] = container_access
+ else:
+ raise Exception('Missing IP address for container: %s' % name)
+ else:
+ if not published_access:
+ raise Exception('Missing published ports for container: %s' % name)
+
+ managed_context = managed.setdefault(container.context, {})
+ managed_context[name] = published_access
+
+ data = {
+ HostType.origin: origin,
+ HostType.control: control,
+ HostType.managed: managed,
+ }
+
+ data = dict((key, value) for key, value in data.items() if value)
+
+ return ContainerDatabase(data)
+
+
+class SupportContainerContext:
+ """Context object for tracking information relating to access of support containers."""
+ def __init__(self, containers: ContainerDatabase, process: t.Optional[SshProcess]) -> None:
+ self.containers = containers
+ self.process = process
+
+ def close(self) -> None:
+ """Close the process maintaining the port forwards."""
+ if not self.process:
+ return # forwarding not in use
+
+ self.process.terminate()
+
+ display.info('Waiting for the session SSH port forwarding process to terminate.', verbosity=1)
+
+ self.process.wait()
+
+
+@contextlib.contextmanager
+def support_container_context(
+ args: EnvironmentConfig,
+ ssh: t.Optional[SshConnectionDetail],
+) -> c.Iterator[t.Optional[ContainerDatabase]]:
+ """Create a context manager for integration tests that use support containers."""
+ if not isinstance(args, (IntegrationConfig, UnitsConfig, SanityConfig, ShellConfig)):
+ yield None # containers are only needed for commands that have targets (hosts or pythons)
+ return
+
+ containers = get_container_database(args)
+
+ if not containers.data:
+ yield ContainerDatabase({}) # no containers are being used, return an empty database
+ return
+
+ context = create_support_container_context(args, ssh, containers)
+
+ try:
+ yield context.containers
+ finally:
+ context.close()
+
+
+def create_support_container_context(
+ args: EnvironmentConfig,
+ ssh: t.Optional[SshConnectionDetail],
+ containers: ContainerDatabase,
+) -> SupportContainerContext:
+ """Context manager that provides SSH port forwards. Returns updated container metadata."""
+ host_type = HostType.control
+
+ revised = ContainerDatabase(containers.data.copy())
+ source = revised.data.pop(HostType.origin, None)
+
+ container_map: dict[tuple[str, int], tuple[str, str, int]] = {}
+
+ if host_type not in revised.data:
+ if not source:
+ raise Exception('Missing origin container details.')
+
+ for context_name, context in source.items():
+ for container_name, container in context.items():
+ if '-controller-' in container_name:
+ continue # hack to avoid exposing the controller container to the controller
+
+ for port, access_port in container.port_map():
+ container_map[(container.host_ip, access_port)] = (context_name, container_name, port)
+
+ if not container_map:
+ return SupportContainerContext(revised, None)
+
+ if not ssh:
+ raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type)
+
+ forwards = list(container_map.keys())
+ process = create_ssh_port_forwards(args, ssh, forwards)
+ result = SupportContainerContext(revised, process)
+
+ try:
+ port_forwards = process.collect_port_forwards()
+ contexts: dict[str, dict[str, ContainerAccess]] = {}
+
+ for forward, forwarded_port in port_forwards.items():
+ access_host, access_port = forward
+ context_name, container_name, container_port = container_map[(access_host, access_port)]
+ container = source[context_name][container_name]
+ context = contexts.setdefault(context_name, {})
+
+ forwarded_container = context.setdefault(container_name, ContainerAccess('127.0.0.1', container.names, None, {}))
+ forwarded_container.forwards[container_port] = forwarded_port
+
+ display.info('Container "%s" port %d available at %s:%d is forwarded over SSH as port %d.' % (
+ container_name, container_port, access_host, access_port, forwarded_port,
+ ), verbosity=1)
+
+ revised.data[host_type] = contexts
+
+ return result
+ except Exception:
+ result.close()
+ raise
+
+
+class ContainerDescriptor:
+ """Information about a support container."""
+ def __init__(self,
+ image: str,
+ context: str,
+ name: str,
+ container_id: str,
+ ports: list[int],
+ aliases: list[str],
+ publish_ports: bool,
+ running: bool,
+ existing: bool,
+ cleanup: CleanupMode,
+ env: t.Optional[dict[str, str]],
+ ) -> None:
+ self.image = image
+ self.context = context
+ self.name = name
+ self.container_id = container_id
+ self.ports = ports
+ self.aliases = aliases
+ self.publish_ports = publish_ports
+ self.running = running
+ self.existing = existing
+ self.cleanup = cleanup
+ self.env = env
+ self.details: t.Optional[SupportContainer] = None
+
+ def start(self, args: EnvironmentConfig) -> None:
+ """Start the container. Used for containers which are created, but not started."""
+ start_container(args, self.name)
+
+ self.register(args)
+
+ def register(self, args: EnvironmentConfig) -> SupportContainer:
+ """Record the container's runtime details. Must be used after the container has been started."""
+ if self.details:
+ raise Exception('Container already registered: %s' % self.name)
+
+ try:
+ container = docker_inspect(args, self.name)
+ except ContainerNotFoundError:
+ if not args.explain:
+ raise
+
+ # provide enough mock data to keep --explain working
+ container = DockerInspect(args, dict(
+ Id=self.container_id,
+ NetworkSettings=dict(
+ IPAddress='127.0.0.1',
+ Ports=dict(('%d/tcp' % port, [dict(HostPort=random.randint(30000, 40000) if self.publish_ports else port)]) for port in self.ports),
+ ),
+ Config=dict(
+ Env=['%s=%s' % (key, value) for key, value in self.env.items()] if self.env else [],
+ ),
+ ))
+
+ support_container_ip = get_container_ip_address(args, container)
+
+ if self.publish_ports:
+ # inspect the support container to locate the published ports
+ tcp_ports = dict((port, container.get_tcp_port(port)) for port in self.ports)
+
+ if any(not config or len(set(conf['HostPort'] for conf in config)) != 1 for config in tcp_ports.values()):
+ raise ApplicationError('Unexpected `docker inspect` results for published TCP ports:\n%s' % json.dumps(tcp_ports, indent=4, sort_keys=True))
+
+ published_ports = dict((port, int(config[0]['HostPort'])) for port, config in tcp_ports.items())
+ else:
+ published_ports = {}
+
+ self.details = SupportContainer(
+ container,
+ support_container_ip,
+ published_ports,
+ )
+
+ return self.details
+
+
+class SupportContainer:
+ """Information about a running support container available for use by tests."""
+ def __init__(self,
+ container: DockerInspect,
+ container_ip: str,
+ published_ports: dict[int, int],
+ ) -> None:
+ self.container = container
+ self.container_ip = container_ip
+ self.published_ports = published_ports
+
+
+def wait_for_file(args: EnvironmentConfig,
+ container_name: str,
+ path: str,
+ sleep: int,
+ tries: int,
+ check: t.Optional[c.Callable[[str], bool]] = None,
+ ) -> str:
+ """Wait for the specified file to become available in the requested container and return its contents."""
+ display.info('Waiting for container "%s" to provide file: %s' % (container_name, path))
+
+ for _iteration in range(1, tries):
+ if _iteration > 1:
+ time.sleep(sleep)
+
+ try:
+ stdout = docker_exec(args, container_name, ['dd', 'if=%s' % path], capture=True)[0]
+ except SubprocessError:
+ continue
+
+ if not check or check(stdout):
+ return stdout
+
+ raise ApplicationError('Timeout waiting for container "%s" to provide file: %s' % (container_name, path))
+
+
+def cleanup_containers(args: EnvironmentConfig) -> None:
+ """Clean up containers."""
+ for container in support_containers.values():
+ if container.cleanup == CleanupMode.YES:
+ docker_rm(args, container.container_id)
+ elif container.cleanup == CleanupMode.INFO:
+ display.notice(f'Remember to run `{require_docker().command} rm -f {container.name}` when finished testing.')
+
+
+def create_hosts_entries(context: dict[str, ContainerAccess]) -> list[str]:
+ """Return hosts entries for the specified context."""
+ entries = []
+ unique_id = uuid.uuid4()
+
+ for container in context.values():
+ # forwards require port redirection through localhost
+ if container.forwards:
+ host_ip = '127.0.0.1'
+ else:
+ host_ip = container.host_ip
+
+ entries.append('%s %s # ansible-test %s' % (host_ip, ' '.join(container.names), unique_id))
+
+ return entries
+
+
+def create_container_hooks(
+ args: IntegrationConfig,
+ control_connections: list[SshConnectionDetail],
+ managed_connections: t.Optional[list[SshConnectionDetail]],
+) -> tuple[t.Optional[c.Callable[[IntegrationTarget], None]], t.Optional[c.Callable[[IntegrationTarget], None]]]:
+ """Return pre and post target callbacks for enabling and disabling container access for each test target."""
+ containers = get_container_database(args)
+
+ control_contexts = containers.data.get(HostType.control)
+
+ if control_contexts:
+ managed_contexts = containers.data.get(HostType.managed)
+
+ if not managed_contexts:
+ managed_contexts = create_managed_contexts(control_contexts)
+
+ control_type = 'posix'
+
+ if isinstance(args, WindowsIntegrationConfig):
+ managed_type = 'windows'
+ else:
+ managed_type = 'posix'
+
+ control_state: dict[str, tuple[list[str], list[SshProcess]]] = {}
+ managed_state: dict[str, tuple[list[str], list[SshProcess]]] = {}
+
+ def pre_target(target: IntegrationTarget) -> None:
+ """Configure hosts for SSH port forwarding required by the specified target."""
+ forward_ssh_ports(args, control_connections, '%s_hosts_prepare.yml' % control_type, control_state, target, HostType.control, control_contexts)
+ forward_ssh_ports(args, managed_connections, '%s_hosts_prepare.yml' % managed_type, managed_state, target, HostType.managed, managed_contexts)
+
+ def post_target(target: IntegrationTarget) -> None:
+ """Clean up previously configured SSH port forwarding which was required by the specified target."""
+ cleanup_ssh_ports(args, control_connections, '%s_hosts_restore.yml' % control_type, control_state, target, HostType.control)
+ cleanup_ssh_ports(args, managed_connections, '%s_hosts_restore.yml' % managed_type, managed_state, target, HostType.managed)
+ else:
+ pre_target, post_target = None, None
+
+ return pre_target, post_target
+
+
+def create_managed_contexts(control_contexts: dict[str, dict[str, ContainerAccess]]) -> dict[str, dict[str, ContainerAccess]]:
+ """Create managed contexts from the given control contexts."""
+ managed_contexts: dict[str, dict[str, ContainerAccess]] = {}
+
+ for context_name, control_context in control_contexts.items():
+ managed_context = managed_contexts[context_name] = {}
+
+ for container_name, control_container in control_context.items():
+ managed_context[container_name] = ContainerAccess(control_container.host_ip, control_container.names, None, dict(control_container.port_map()))
+
+ return managed_contexts
+
+
+def forward_ssh_ports(
+ args: IntegrationConfig,
+ ssh_connections: t.Optional[list[SshConnectionDetail]],
+ playbook: str,
+ target_state: dict[str, tuple[list[str], list[SshProcess]]],
+ target: IntegrationTarget,
+ host_type: str,
+ contexts: dict[str, dict[str, ContainerAccess]],
+) -> None:
+ """Configure port forwarding using SSH and write hosts file entries."""
+ if ssh_connections is None:
+ return
+
+ test_context = None
+
+ for context_name, context in contexts.items():
+ context_alias = 'cloud/%s/' % context_name
+
+ if context_alias in target.aliases:
+ test_context = context
+ break
+
+ if not test_context:
+ return
+
+ if not ssh_connections:
+ if args.explain:
+ return
+
+ raise Exception('The %s host was not pre-configured for container access and SSH forwarding is not available.' % host_type)
+
+ redirects: list[tuple[int, str, int]] = []
+ messages = []
+
+ for container_name, container in test_context.items():
+ explain = []
+
+ for container_port, access_port in container.port_map():
+ if container.forwards:
+ redirects.append((container_port, container.host_ip, access_port))
+
+ explain.append('%d -> %s:%d' % (container_port, container.host_ip, access_port))
+ else:
+ explain.append('%s:%d' % (container.host_ip, container_port))
+
+ if explain:
+ if container.forwards:
+ message = 'Port forwards for the "%s" container have been established on the %s host' % (container_name, host_type)
+ else:
+ message = 'Ports for the "%s" container are available on the %s host as' % (container_name, host_type)
+
+ messages.append('%s:\n%s' % (message, '\n'.join(explain)))
+
+ hosts_entries = create_hosts_entries(test_context)
+ inventory = generate_ssh_inventory(ssh_connections)
+
+ with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str
+ run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries))
+
+ ssh_processes: list[SshProcess] = []
+
+ if redirects:
+ for ssh in ssh_connections:
+ ssh_processes.append(create_ssh_port_redirects(args, ssh, redirects))
+
+ target_state[target.name] = (hosts_entries, ssh_processes)
+
+ for message in messages:
+ display.info(message, verbosity=1)
+
+
+def cleanup_ssh_ports(
+ args: IntegrationConfig,
+ ssh_connections: list[SshConnectionDetail],
+ playbook: str,
+ target_state: dict[str, tuple[list[str], list[SshProcess]]],
+ target: IntegrationTarget,
+ host_type: str,
+) -> None:
+ """Stop previously configured SSH port forwarding and remove previously written hosts file entries."""
+ state = target_state.pop(target.name, None)
+
+ if not state:
+ return
+
+ (hosts_entries, ssh_processes) = state
+
+ inventory = generate_ssh_inventory(ssh_connections)
+
+ with named_temporary_file(args, 'ssh-inventory-', '.json', None, inventory) as inventory_path: # type: str
+ run_playbook(args, inventory_path, playbook, capture=False, variables=dict(hosts_entries=hosts_entries))
+
+ if ssh_processes:
+ for process in ssh_processes:
+ process.terminate()
+
+ display.info('Waiting for the %s host SSH port forwarding process(es) to terminate.' % host_type, verbosity=1)
+
+ for process in ssh_processes:
+ process.wait()
diff --git a/test/lib/ansible_test/_internal/content_config.py b/test/lib/ansible_test/_internal/content_config.py
new file mode 100644
index 0000000..7ac1876
--- /dev/null
+++ b/test/lib/ansible_test/_internal/content_config.py
@@ -0,0 +1,179 @@
+"""Content configuration."""
+from __future__ import annotations
+
+import os
+import pickle
+import typing as t
+
+from .constants import (
+ CONTROLLER_PYTHON_VERSIONS,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from .compat.packaging import (
+ PACKAGING_IMPORT_ERROR,
+ SpecifierSet,
+ Version,
+)
+
+from .compat.yaml import (
+ YAML_IMPORT_ERROR,
+ yaml_load,
+)
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ str_to_version,
+)
+
+from .data import (
+ data_context,
+)
+
+from .config import (
+ EnvironmentConfig,
+ ContentConfig,
+ ModulesConfig,
+)
+
+MISSING = object()
+
+
+def parse_modules_config(data: t.Any) -> ModulesConfig:
+ """Parse the given dictionary as module config and return it."""
+ if not isinstance(data, dict):
+ raise Exception('config must be type `dict` not `%s`' % type(data))
+
+ python_requires = data.get('python_requires', MISSING)
+
+ if python_requires == MISSING:
+ raise KeyError('python_requires is required')
+
+ return ModulesConfig(
+ python_requires=python_requires,
+ python_versions=parse_python_requires(python_requires),
+ controller_only=python_requires == 'controller',
+ )
+
+
+def parse_content_config(data: t.Any) -> ContentConfig:
+ """Parse the given dictionary as content config and return it."""
+ if not isinstance(data, dict):
+ raise Exception('config must be type `dict` not `%s`' % type(data))
+
+ # Configuration specific to modules/module_utils.
+ modules = parse_modules_config(data.get('modules', {}))
+
+ # Python versions supported by the controller, combined with Python versions supported by modules/module_utils.
+ # Mainly used for display purposes and to limit the Python versions used for sanity tests.
+ python_versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS
+ if version in CONTROLLER_PYTHON_VERSIONS or version in modules.python_versions)
+
+ # True if Python 2.x is supported.
+ py2_support = any(version for version in python_versions if str_to_version(version)[0] == 2)
+
+ return ContentConfig(
+ modules=modules,
+ python_versions=python_versions,
+ py2_support=py2_support,
+ )
+
+
+def load_config(path: str) -> t.Optional[ContentConfig]:
+ """Load and parse the specified config file and return the result or None if loading/parsing failed."""
+ if YAML_IMPORT_ERROR:
+ raise ApplicationError('The "PyYAML" module is required to parse config: %s' % YAML_IMPORT_ERROR)
+
+ if PACKAGING_IMPORT_ERROR:
+ raise ApplicationError('The "packaging" module is required to parse config: %s' % PACKAGING_IMPORT_ERROR)
+
+ value = read_text_file(path)
+
+ try:
+ yaml_value = yaml_load(value)
+ except Exception as ex: # pylint: disable=broad-except
+ display.warning('Ignoring config "%s" due to a YAML parsing error: %s' % (path, ex))
+ return None
+
+ try:
+ config = parse_content_config(yaml_value)
+ except Exception as ex: # pylint: disable=broad-except
+ display.warning('Ignoring config "%s" due a config parsing error: %s' % (path, ex))
+ return None
+
+ display.info('Loaded configuration: %s' % path, verbosity=1)
+
+ return config
+
+
+def get_content_config(args: EnvironmentConfig) -> ContentConfig:
+ """
+ Parse and return the content configuration (if any) for the current collection.
+ For ansible-core, a default configuration is used.
+ Results are cached.
+ """
+ if args.host_path:
+ args.content_config = deserialize_content_config(os.path.join(args.host_path, 'config.dat'))
+
+ if args.content_config:
+ return args.content_config
+
+ collection_config_path = 'tests/config.yml'
+
+ config = None
+
+ if data_context().content.collection and os.path.exists(collection_config_path):
+ config = load_config(collection_config_path)
+
+ if not config:
+ config = parse_content_config(dict(
+ modules=dict(
+ python_requires='default',
+ ),
+ ))
+
+ if not config.modules.python_versions:
+ raise ApplicationError('This collection does not declare support for modules/module_utils on any known Python version.\n'
+ 'Ansible supports modules/module_utils on Python versions: %s\n'
+ 'This collection provides the Python requirement: %s' % (
+ ', '.join(SUPPORTED_PYTHON_VERSIONS), config.modules.python_requires))
+
+ args.content_config = config
+
+ return config
+
+
+def parse_python_requires(value: t.Any) -> tuple[str, ...]:
+ """Parse the given 'python_requires' version specifier and return the matching Python versions."""
+ if not isinstance(value, str):
+ raise ValueError('python_requires must must be of type `str` not type `%s`' % type(value))
+
+ versions: tuple[str, ...]
+
+ if value == 'default':
+ versions = SUPPORTED_PYTHON_VERSIONS
+ elif value == 'controller':
+ versions = CONTROLLER_PYTHON_VERSIONS
+ else:
+ specifier_set = SpecifierSet(value)
+ versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version)))
+
+ return versions
+
+
+def serialize_content_config(args: EnvironmentConfig, path: str) -> None:
+ """Serialize the content config to the given path. If the config has not been loaded, an empty config will be serialized."""
+ with open_binary_file(path, 'wb') as config_file:
+ pickle.dump(args.content_config, config_file)
+
+
+def deserialize_content_config(path: str) -> ContentConfig:
+ """Deserialize content config from the path."""
+ with open_binary_file(path) as config_file:
+ return pickle.load(config_file)
diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py
new file mode 100644
index 0000000..d62b903
--- /dev/null
+++ b/test/lib/ansible_test/_internal/core_ci.py
@@ -0,0 +1,547 @@
+"""Access Ansible Core CI remote services."""
+from __future__ import annotations
+
+import abc
+import dataclasses
+import json
+import os
+import re
+import traceback
+import uuid
+import time
+import typing as t
+
+from .http import (
+ HttpClient,
+ HttpResponse,
+ HttpError,
+)
+
+from .io import (
+ make_dirs,
+ read_text_file,
+ write_json_file,
+ write_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ ANSIBLE_TEST_TARGET_ROOT,
+ mutex,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+from .data import (
+ data_context,
+)
+
+
+@dataclasses.dataclass(frozen=True)
+class Resource(metaclass=abc.ABCMeta):
+ """Base class for Ansible Core CI resources."""
+ @abc.abstractmethod
+ def as_tuple(self) -> tuple[str, str, str, str]:
+ """Return the resource as a tuple of platform, version, architecture and provider."""
+
+ @abc.abstractmethod
+ def get_label(self) -> str:
+ """Return a user-friendly label for this resource."""
+
+ @property
+ @abc.abstractmethod
+ def persist(self) -> bool:
+ """True if the resource is persistent, otherwise false."""
+
+
+@dataclasses.dataclass(frozen=True)
+class VmResource(Resource):
+ """Details needed to request a VM from Ansible Core CI."""
+ platform: str
+ version: str
+ architecture: str
+ provider: str
+ tag: str
+
+ def as_tuple(self) -> tuple[str, str, str, str]:
+ """Return the resource as a tuple of platform, version, architecture and provider."""
+ return self.platform, self.version, self.architecture, self.provider
+
+ def get_label(self) -> str:
+ """Return a user-friendly label for this resource."""
+ return f'{self.platform} {self.version} ({self.architecture}) [{self.tag}] @{self.provider}'
+
+ @property
+ def persist(self) -> bool:
+ """True if the resource is persistent, otherwise false."""
+ return True
+
+
+@dataclasses.dataclass(frozen=True)
+class CloudResource(Resource):
+ """Details needed to request cloud credentials from Ansible Core CI."""
+ platform: str
+
+ def as_tuple(self) -> tuple[str, str, str, str]:
+ """Return the resource as a tuple of platform, version, architecture and provider."""
+ return self.platform, '', '', self.platform
+
+ def get_label(self) -> str:
+ """Return a user-friendly label for this resource."""
+ return self.platform
+
+ @property
+ def persist(self) -> bool:
+ """True if the resource is persistent, otherwise false."""
+ return False
+
+
+class AnsibleCoreCI:
+ """Client for Ansible Core CI services."""
+ DEFAULT_ENDPOINT = 'https://ansible-core-ci.testing.ansible.com'
+
+ def __init__(
+ self,
+ args: EnvironmentConfig,
+ resource: Resource,
+ load: bool = True,
+ ) -> None:
+ self.args = args
+ self.resource = resource
+ self.platform, self.version, self.arch, self.provider = self.resource.as_tuple()
+ self.stage = args.remote_stage
+ self.client = HttpClient(args)
+ self.connection = None
+ self.instance_id = None
+ self.endpoint = None
+ self.default_endpoint = args.remote_endpoint or self.DEFAULT_ENDPOINT
+ self.retries = 3
+ self.ci_provider = get_ci_provider()
+ self.label = self.resource.get_label()
+
+ stripped_label = re.sub('[^A-Za-z0-9_.]+', '-', self.label).strip('-')
+
+ self.name = f"{stripped_label}-{self.stage}" # turn the label into something suitable for use as a filename
+
+ self.path = os.path.expanduser(f'~/.ansible/test/instances/{self.name}')
+ self.ssh_key = SshKey(args)
+
+ if self.resource.persist and load and self._load():
+ try:
+ display.info(f'Checking existing {self.label} instance using: {self._uri}', verbosity=1)
+
+ self.connection = self.get(always_raise_on=[404])
+
+ display.info(f'Loaded existing {self.label} instance.', verbosity=1)
+ except HttpError as ex:
+ if ex.status != 404:
+ raise
+
+ self._clear()
+
+ display.info(f'Cleared stale {self.label} instance.', verbosity=1)
+
+ self.instance_id = None
+ self.endpoint = None
+ elif not self.resource.persist:
+ self.instance_id = None
+ self.endpoint = None
+ self._clear()
+
+ if self.instance_id:
+ self.started: bool = True
+ else:
+ self.started = False
+ self.instance_id = str(uuid.uuid4())
+ self.endpoint = None
+
+ display.sensitive.add(self.instance_id)
+
+ if not self.endpoint:
+ self.endpoint = self.default_endpoint
+
+ @property
+ def available(self) -> bool:
+ """Return True if Ansible Core CI is supported."""
+ return self.ci_provider.supports_core_ci_auth()
+
+ def start(self) -> t.Optional[dict[str, t.Any]]:
+ """Start instance."""
+ if self.started:
+ display.info(f'Skipping started {self.label} instance.', verbosity=1)
+ return None
+
+ return self._start(self.ci_provider.prepare_core_ci_auth())
+
+ def stop(self) -> None:
+ """Stop instance."""
+ if not self.started:
+ display.info(f'Skipping invalid {self.label} instance.', verbosity=1)
+ return
+
+ response = self.client.delete(self._uri)
+
+ if response.status_code == 404:
+ self._clear()
+ display.info(f'Cleared invalid {self.label} instance.', verbosity=1)
+ return
+
+ if response.status_code == 200:
+ self._clear()
+ display.info(f'Stopped running {self.label} instance.', verbosity=1)
+ return
+
+ raise self._create_http_error(response)
+
+ def get(self, tries: int = 3, sleep: int = 15, always_raise_on: t.Optional[list[int]] = None) -> t.Optional[InstanceConnection]:
+ """Get instance connection information."""
+ if not self.started:
+ display.info(f'Skipping invalid {self.label} instance.', verbosity=1)
+ return None
+
+ if not always_raise_on:
+ always_raise_on = []
+
+ if self.connection and self.connection.running:
+ return self.connection
+
+ while True:
+ tries -= 1
+ response = self.client.get(self._uri)
+
+ if response.status_code == 200:
+ break
+
+ error = self._create_http_error(response)
+
+ if not tries or response.status_code in always_raise_on:
+ raise error
+
+ display.warning(f'{error}. Trying again after {sleep} seconds.')
+ time.sleep(sleep)
+
+ if self.args.explain:
+ self.connection = InstanceConnection(
+ running=True,
+ hostname='cloud.example.com',
+ port=12345,
+ username='root',
+ password='password' if self.platform == 'windows' else None,
+ )
+ else:
+ response_json = response.json()
+ status = response_json['status']
+ con = response_json.get('connection')
+
+ if con:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ hostname=con['hostname'],
+ port=int(con['port']),
+ username=con['username'],
+ password=con.get('password'),
+ response_json=response_json,
+ )
+ else:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ response_json=response_json,
+ )
+
+ if self.connection.password:
+ display.sensitive.add(str(self.connection.password))
+
+ status = 'running' if self.connection.running else 'starting'
+
+ display.info(f'The {self.label} instance is {status}.', verbosity=1)
+
+ return self.connection
+
+ def wait(self, iterations: t.Optional[int] = 90) -> None:
+ """Wait for the instance to become ready."""
+ for _iteration in range(1, iterations):
+ if self.get().running:
+ return
+ time.sleep(10)
+
+ raise ApplicationError(f'Timeout waiting for {self.label} instance.')
+
+ @property
+ def _uri(self) -> str:
+ return f'{self.endpoint}/{self.stage}/{self.provider}/{self.instance_id}'
+
+ def _start(self, auth) -> dict[str, t.Any]:
+ """Start instance."""
+ display.info(f'Initializing new {self.label} instance using: {self._uri}', verbosity=1)
+
+ if self.platform == 'windows':
+ winrm_config = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'ConfigureRemotingForAnsible.ps1'))
+ else:
+ winrm_config = None
+
+ data = dict(
+ config=dict(
+ platform=self.platform,
+ version=self.version,
+ architecture=self.arch,
+ public_key=self.ssh_key.pub_contents,
+ winrm_config=winrm_config,
+ )
+ )
+
+ data.update(dict(auth=auth))
+
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ response = self._start_endpoint(data, headers)
+
+ self.started = True
+ self._save()
+
+ display.info(f'Started {self.label} instance.', verbosity=1)
+
+ if self.args.explain:
+ return {}
+
+ return response.json()
+
+ def _start_endpoint(self, data: dict[str, t.Any], headers: dict[str, str]) -> HttpResponse:
+ tries = self.retries
+ sleep = 15
+
+ while True:
+ tries -= 1
+ response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
+
+ if response.status_code == 200:
+ return response
+
+ error = self._create_http_error(response)
+
+ if response.status_code == 503:
+ raise error
+
+ if not tries:
+ raise error
+
+ display.warning(f'{error}. Trying again after {sleep} seconds.')
+ time.sleep(sleep)
+
+ def _clear(self) -> None:
+ """Clear instance information."""
+ try:
+ self.connection = None
+ os.remove(self.path)
+ except FileNotFoundError:
+ pass
+
+ def _load(self) -> bool:
+ """Load instance information."""
+ try:
+ data = read_text_file(self.path)
+ except FileNotFoundError:
+ return False
+
+ if not data.startswith('{'):
+ return False # legacy format
+
+ config = json.loads(data)
+
+ return self.load(config)
+
+ def load(self, config: dict[str, str]) -> bool:
+ """Load the instance from the provided dictionary."""
+ self.instance_id = str(config['instance_id'])
+ self.endpoint = config['endpoint']
+ self.started = True
+
+ display.sensitive.add(self.instance_id)
+
+ return True
+
+ def _save(self) -> None:
+ """Save instance information."""
+ if self.args.explain:
+ return
+
+ config = self.save()
+
+ write_json_file(self.path, config, create_directories=True)
+
+ def save(self) -> dict[str, str]:
+ """Save instance details and return as a dictionary."""
+ return dict(
+ label=self.resource.get_label(),
+ instance_id=self.instance_id,
+ endpoint=self.endpoint,
+ )
+
+ @staticmethod
+ def _create_http_error(response: HttpResponse) -> ApplicationError:
+ """Return an exception created from the given HTTP response."""
+ response_json = response.json()
+ stack_trace = ''
+
+ if 'message' in response_json:
+ message = response_json['message']
+ elif 'errorMessage' in response_json:
+ message = response_json['errorMessage'].strip()
+ if 'stackTrace' in response_json:
+ traceback_lines = response_json['stackTrace']
+
+ # AWS Lambda on Python 2.7 returns a list of tuples
+ # AWS Lambda on Python 3.7 returns a list of strings
+ if traceback_lines and isinstance(traceback_lines[0], list):
+ traceback_lines = traceback.format_list(traceback_lines)
+
+ trace = '\n'.join([x.rstrip() for x in traceback_lines])
+ stack_trace = f'\nTraceback (from remote server):\n{trace}'
+ else:
+ message = str(response_json)
+
+ return CoreHttpError(response.status_code, message, stack_trace)
+
+
+class CoreHttpError(HttpError):
+ """HTTP response as an error."""
+ def __init__(self, status: int, remote_message: str, remote_stack_trace: str) -> None:
+ super().__init__(status, f'{remote_message}{remote_stack_trace}')
+
+ self.remote_message = remote_message
+ self.remote_stack_trace = remote_stack_trace
+
+
+class SshKey:
+ """Container for SSH key used to connect to remote instances."""
+ KEY_TYPE = 'rsa' # RSA is used to maintain compatibility with paramiko and EC2
+ KEY_NAME = f'id_{KEY_TYPE}'
+ PUB_NAME = f'{KEY_NAME}.pub'
+
+ @mutex
+ def __init__(self, args: EnvironmentConfig) -> None:
+ key_pair = self.get_key_pair()
+
+ if not key_pair:
+ key_pair = self.generate_key_pair(args)
+
+ key, pub = key_pair
+ key_dst, pub_dst = self.get_in_tree_key_pair_paths()
+
+ def ssh_key_callback(files: list[tuple[str, str]]) -> None:
+ """
+ Add the SSH keys to the payload file list.
+ They are either outside the source tree or in the cache dir which is ignored by default.
+ """
+ files.append((key, os.path.relpath(key_dst, data_context().content.root)))
+ files.append((pub, os.path.relpath(pub_dst, data_context().content.root)))
+
+ data_context().register_payload_callback(ssh_key_callback)
+
+ self.key, self.pub = key, pub
+
+ if args.explain:
+ self.pub_contents = None
+ self.key_contents = None
+ else:
+ self.pub_contents = read_text_file(self.pub).strip()
+ self.key_contents = read_text_file(self.key).strip()
+
+ @staticmethod
+ def get_relative_in_tree_private_key_path() -> str:
+ """Return the ansible-test SSH private key path relative to the content tree."""
+ temp_dir = ResultType.TMP.relative_path
+
+ key = os.path.join(temp_dir, SshKey.KEY_NAME)
+
+ return key
+
+ def get_in_tree_key_pair_paths(self) -> t.Optional[tuple[str, str]]:
+ """Return the ansible-test SSH key pair paths from the content tree."""
+ temp_dir = ResultType.TMP.path
+
+ key = os.path.join(temp_dir, self.KEY_NAME)
+ pub = os.path.join(temp_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_source_key_pair_paths(self) -> t.Optional[tuple[str, str]]:
+ """Return the ansible-test SSH key pair paths for the current user."""
+ base_dir = os.path.expanduser('~/.ansible/test/')
+
+ key = os.path.join(base_dir, self.KEY_NAME)
+ pub = os.path.join(base_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_key_pair(self) -> t.Optional[tuple[str, str]]:
+ """Return the ansible-test SSH key pair paths if present, otherwise return None."""
+ key, pub = self.get_in_tree_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ key, pub = self.get_source_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ return None
+
+ def generate_key_pair(self, args: EnvironmentConfig) -> tuple[str, str]:
+ """Generate an SSH key pair for use by all ansible-test invocations for the current user."""
+ key, pub = self.get_source_key_pair_paths()
+
+ if not args.explain:
+ make_dirs(os.path.dirname(key))
+
+ if not os.path.isfile(key) or not os.path.isfile(pub):
+ run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', self.KEY_TYPE, '-N', '', '-f', key], capture=True)
+
+ if args.explain:
+ return key, pub
+
+ # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko
+ key_contents = read_text_file(key)
+ key_contents = re.sub(r'(BEGIN|END) PRIVATE KEY', r'\1 RSA PRIVATE KEY', key_contents)
+
+ write_text_file(key, key_contents)
+
+ return key, pub
+
+
+class InstanceConnection:
+ """Container for remote instance status and connection details."""
+ def __init__(self,
+ running: bool,
+ hostname: t.Optional[str] = None,
+ port: t.Optional[int] = None,
+ username: t.Optional[str] = None,
+ password: t.Optional[str] = None,
+ response_json: t.Optional[dict[str, t.Any]] = None,
+ ) -> None:
+ self.running = running
+ self.hostname = hostname
+ self.port = port
+ self.username = username
+ self.password = password
+ self.response_json = response_json or {}
+
+ def __str__(self):
+ if self.password:
+ return f'{self.hostname}:{self.port} [{self.username}:{self.password}]'
+
+ return f'{self.hostname}:{self.port} [{self.username}]'
diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py
new file mode 100644
index 0000000..0f44505
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage_util.py
@@ -0,0 +1,314 @@
+"""Utility code for facilitating collection of code coverage when running tests."""
+from __future__ import annotations
+
+import atexit
+import dataclasses
+import os
+import sqlite3
+import tempfile
+import typing as t
+
+from .config import (
+ IntegrationConfig,
+ SanityConfig,
+ TestConfig,
+)
+
+from .io import (
+ write_text_file,
+ make_dirs,
+ open_binary_file,
+)
+
+from .util import (
+ ApplicationError,
+ InternalError,
+ COVERAGE_CONFIG_NAME,
+ remove_tree,
+ sanitize_host_name,
+ str_to_version,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ intercept_python,
+ ResultType,
+)
+
+from .host_configs import (
+ DockerConfig,
+ HostConfig,
+ OriginConfig,
+ PosixRemoteConfig,
+ PosixSshConfig,
+ PythonConfig,
+)
+
+from .constants import (
+ SUPPORTED_PYTHON_VERSIONS,
+ CONTROLLER_PYTHON_VERSIONS,
+)
+
+from .thread import (
+ mutex,
+)
+
+
+@dataclasses.dataclass(frozen=True)
+class CoverageVersion:
+ """Details about a coverage version and its supported Python versions."""
+ coverage_version: str
+ schema_version: int
+ min_python: tuple[int, int]
+ max_python: tuple[int, int]
+
+
+COVERAGE_VERSIONS = (
+ # IMPORTANT: Keep this in sync with the ansible-test.txt requirements file.
+ CoverageVersion('6.5.0', 7, (3, 7), (3, 11)),
+ CoverageVersion('4.5.4', 0, (2, 6), (3, 6)),
+)
+"""
+This tuple specifies the coverage version to use for Python version ranges.
+"""
+
+CONTROLLER_COVERAGE_VERSION = COVERAGE_VERSIONS[0]
+"""The coverage version supported on the controller."""
+
+
+class CoverageError(ApplicationError):
+ """Exception caused while attempting to read a coverage file."""
+ def __init__(self, path: str, message: str) -> None:
+ self.path = path
+ self.message = message
+
+ super().__init__(f'Error reading coverage file "{os.path.relpath(path)}": {message}')
+
+
+def get_coverage_version(version: str) -> CoverageVersion:
+ """Return the coverage version to use with the specified Python version."""
+ python_version = str_to_version(version)
+ supported_versions = [entry for entry in COVERAGE_VERSIONS if entry.min_python <= python_version <= entry.max_python]
+
+ if not supported_versions:
+ raise InternalError(f'Python {version} has no matching entry in COVERAGE_VERSIONS.')
+
+ if len(supported_versions) > 1:
+ raise InternalError(f'Python {version} has multiple matching entries in COVERAGE_VERSIONS.')
+
+ coverage_version = supported_versions[0]
+
+ return coverage_version
+
+
+def get_coverage_file_schema_version(path: str) -> int:
+ """
+ Return the schema version from the specified coverage file.
+ SQLite based files report schema version 1 or later.
+ JSON based files are reported as schema version 0.
+ An exception is raised if the file is not recognized or the schema version cannot be determined.
+ """
+ with open_binary_file(path) as file_obj:
+ header = file_obj.read(16)
+
+ if header.startswith(b'!coverage.py: '):
+ return 0
+
+ if header.startswith(b'SQLite'):
+ return get_sqlite_schema_version(path)
+
+ raise CoverageError(path, f'Unknown header: {header!r}')
+
+
+def get_sqlite_schema_version(path: str) -> int:
+ """Return the schema version from a SQLite based coverage file."""
+ try:
+ with sqlite3.connect(path) as connection:
+ cursor = connection.cursor()
+ cursor.execute('select version from coverage_schema')
+ schema_version = cursor.fetchmany(1)[0][0]
+ except Exception as ex:
+ raise CoverageError(path, f'SQLite error: {ex}') from ex
+
+ if not isinstance(schema_version, int):
+ raise CoverageError(path, f'Schema version is {type(schema_version)} instead of {int}: {schema_version}')
+
+ if schema_version < 1:
+ raise CoverageError(path, f'Schema version is out-of-range: {schema_version}')
+
+ return schema_version
+
+
+def cover_python(
+ args: TestConfig,
+ python: PythonConfig,
+ cmd: list[str],
+ target_name: str,
+ env: dict[str, str],
+ capture: bool,
+ data: t.Optional[str] = None,
+ cwd: t.Optional[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run a command while collecting Python code coverage."""
+ if args.coverage:
+ env.update(get_coverage_environment(args, target_name, python.version))
+
+ return intercept_python(args, python, cmd, env, capture, data, cwd)
+
+
+def get_coverage_platform(config: HostConfig) -> str:
+ """Return the platform label for the given host config."""
+ if isinstance(config, PosixRemoteConfig):
+ platform = f'remote-{sanitize_host_name(config.name)}'
+ elif isinstance(config, DockerConfig):
+ platform = f'docker-{sanitize_host_name(config.name)}'
+ elif isinstance(config, PosixSshConfig):
+ platform = f'ssh-{sanitize_host_name(config.host)}'
+ elif isinstance(config, OriginConfig):
+ platform = 'origin' # previous versions of ansible-test used "local-{python_version}"
+ else:
+ raise NotImplementedError(f'Coverage platform label not defined for type: {type(config)}')
+
+ return platform
+
+
+def get_coverage_environment(
+ args: TestConfig,
+ target_name: str,
+ version: str,
+) -> dict[str, str]:
+ """Return environment variables needed to collect code coverage."""
+ # unit tests, sanity tests and other special cases (localhost only)
+ # config is in a temporary directory
+ # results are in the source tree
+ config_file = get_coverage_config(args)
+ coverage_name = '='.join((args.command, target_name, get_coverage_platform(args.controller), f'python-{version}', 'coverage'))
+ coverage_dir = os.path.join(data_context().content.root, data_context().content.results_path, ResultType.COVERAGE.name)
+ coverage_file = os.path.join(coverage_dir, coverage_name)
+
+ make_dirs(coverage_dir)
+
+ if args.coverage_check:
+ # cause the 'coverage' module to be found, but not imported or enabled
+ coverage_file = ''
+
+ # Enable code coverage collection on local Python programs (this does not include Ansible modules).
+ # Used by the injectors to support code coverage.
+ # Used by the pytest unit test plugin to support code coverage.
+ # The COVERAGE_FILE variable is also used directly by the 'coverage' module.
+ env = dict(
+ COVERAGE_CONF=config_file,
+ COVERAGE_FILE=coverage_file,
+ )
+
+ return env
+
+
+@mutex
+def get_coverage_config(args: TestConfig) -> str:
+ """Return the path to the coverage config, creating the config if it does not already exist."""
+ try:
+ return get_coverage_config.path # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ coverage_config = generate_coverage_config(args)
+
+ if args.explain:
+ temp_dir = '/tmp/coverage-temp-dir'
+ else:
+ temp_dir = tempfile.mkdtemp()
+ atexit.register(lambda: remove_tree(temp_dir))
+
+ path = os.path.join(temp_dir, COVERAGE_CONFIG_NAME)
+
+ if not args.explain:
+ write_text_file(path, coverage_config)
+
+ get_coverage_config.path = path # type: ignore[attr-defined]
+
+ return path
+
+
+def generate_coverage_config(args: TestConfig) -> str:
+ """Generate code coverage configuration for tests."""
+ if data_context().content.collection:
+ coverage_config = generate_collection_coverage_config(args)
+ else:
+ coverage_config = generate_ansible_coverage_config()
+
+ return coverage_config
+
+
+def generate_ansible_coverage_config() -> str:
+ """Generate code coverage configuration for Ansible tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+
+omit =
+ */python*/dist-packages/*
+ */python*/site-packages/*
+ */python*/distutils/*
+ */pyshared/*
+ */pytest
+ */AnsiballZ_*.py
+ */test/results/*
+'''
+
+ return coverage_config
+
+
+def generate_collection_coverage_config(args: TestConfig) -> str:
+ """Generate code coverage configuration for Ansible Collection tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+disable_warnings =
+ no-data-collected
+'''
+
+ if isinstance(args, IntegrationConfig):
+ coverage_config += '''
+include =
+ %s/*
+ */%s/*
+''' % (data_context().content.root, data_context().content.collection.directory)
+ elif isinstance(args, SanityConfig):
+ # temporary work-around for import sanity test
+ coverage_config += '''
+include =
+ %s/*
+
+omit =
+ %s/*
+''' % (data_context().content.root, os.path.join(data_context().content.root, data_context().content.results_path))
+ else:
+ coverage_config += '''
+include =
+ %s/*
+''' % data_context().content.root
+
+ return coverage_config
+
+
+def self_check() -> None:
+ """Check for internal errors due to incorrect code changes."""
+ # Verify all supported Python versions have a coverage version.
+ for version in SUPPORTED_PYTHON_VERSIONS:
+ get_coverage_version(version)
+
+ # Verify all controller Python versions are mapped to the latest coverage version.
+ for version in CONTROLLER_PYTHON_VERSIONS:
+ if get_coverage_version(version) != CONTROLLER_COVERAGE_VERSION:
+ raise InternalError(f'Controller Python version {version} is not mapped to the latest coverage version.')
+
+
+self_check()
diff --git a/test/lib/ansible_test/_internal/data.py b/test/lib/ansible_test/_internal/data.py
new file mode 100644
index 0000000..635b0c3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/data.py
@@ -0,0 +1,286 @@
+"""Context information for the current invocation of ansible-test."""
+from __future__ import annotations
+
+import collections.abc as c
+import dataclasses
+import os
+import typing as t
+
+from .util import (
+ ApplicationError,
+ import_plugins,
+ is_subdir,
+ is_valid_identifier,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ ANSIBLE_SOURCE_ROOT,
+ display,
+ cache,
+)
+
+from .provider import (
+ find_path_provider,
+ get_path_provider_classes,
+ ProviderNotFoundForPath,
+)
+
+from .provider.source import (
+ SourceProvider,
+)
+
+from .provider.source.unversioned import (
+ UnversionedSource,
+)
+
+from .provider.source.installed import (
+ InstalledSource,
+)
+
+from .provider.source.unsupported import (
+ UnsupportedSource,
+)
+
+from .provider.layout import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+from .provider.layout.unsupported import (
+ UnsupportedLayout,
+)
+
+
+class DataContext:
+ """Data context providing details about the current execution environment for ansible-test."""
+ def __init__(self) -> None:
+ content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
+ current_path = os.getcwd()
+
+ layout_providers = get_path_provider_classes(LayoutProvider)
+ source_providers = get_path_provider_classes(SourceProvider)
+
+ self.__layout_providers = layout_providers
+ self.__source_providers = source_providers
+ self.__ansible_source: t.Optional[tuple[tuple[str, str], ...]] = None
+
+ self.payload_callbacks: list[c.Callable[[list[tuple[str, str]]], None]] = []
+
+ if content_path:
+ content = self.__create_content_layout(layout_providers, source_providers, content_path, False)
+ elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
+ content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
+ else:
+ content = self.__create_content_layout(layout_providers, source_providers, current_path, True)
+
+ self.content: ContentLayout = content
+
+ def create_collection_layouts(self) -> list[ContentLayout]:
+ """
+ Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
+ An empty list is returned if the current content layout is not a collection layout.
+ """
+ layout = self.content
+ collection = layout.collection
+
+ if not collection:
+ return []
+
+ root_path = os.path.join(collection.root, 'ansible_collections')
+ display.info('Scanning collection root: %s' % root_path, verbosity=1)
+ namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
+ collections = []
+
+ for namespace_name in namespace_names:
+ namespace_path = os.path.join(root_path, namespace_name)
+ collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
+
+ for collection_name in collection_names:
+ collection_path = os.path.join(namespace_path, collection_name)
+
+ if collection_path == os.path.join(collection.root, collection.directory):
+ collection_layout = layout
+ else:
+ collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)
+
+ file_count = len(collection_layout.all_files())
+
+ if not file_count:
+ continue
+
+ display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
+ collections.append(collection_layout)
+
+ return collections
+
+ @staticmethod
+ def __create_content_layout(layout_providers: list[t.Type[LayoutProvider]],
+ source_providers: list[t.Type[SourceProvider]],
+ root: str,
+ walk: bool,
+ ) -> ContentLayout:
+ """Create a content layout using the given providers and root path."""
+ try:
+ layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
+ except ProviderNotFoundForPath:
+ layout_provider = UnsupportedLayout(root)
+
+ try:
+ # Begin the search for the source provider at the layout provider root.
+ # This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
+ # Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
+ # It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
+ if isinstance(layout_provider, UnsupportedLayout):
+ source_provider: SourceProvider = UnsupportedSource(layout_provider.root)
+ else:
+ source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(layout_provider.root)
+
+ layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
+
+ return layout
+
+ def __create_ansible_source(self):
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not ANSIBLE_SOURCE_ROOT:
+ sources = []
+
+ source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ return tuple(sources)
+
+ if self.content.is_ansible:
+ return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
+
+ try:
+ source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
+
+ return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
+
+ @property
+ def ansible_source(self) -> tuple[tuple[str, str], ...]:
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not self.__ansible_source:
+ self.__ansible_source = self.__create_ansible_source()
+
+ return self.__ansible_source
+
+ def register_payload_callback(self, callback: c.Callable[[list[tuple[str, str]]], None]) -> None:
+ """Register the given payload callback."""
+ self.payload_callbacks.append(callback)
+
+ def check_layout(self) -> None:
+ """Report an error if the layout is unsupported."""
+ if self.content.unsupported:
+ raise ApplicationError(self.explain_working_directory())
+
+ def explain_working_directory(self) -> str:
+ """Return a message explaining the working directory requirements."""
+ blocks = [
+ 'The current working directory must be within the source tree being tested.',
+ '',
+ ]
+
+ if ANSIBLE_SOURCE_ROOT:
+ blocks.append(f'Testing Ansible: {ANSIBLE_SOURCE_ROOT}/')
+ blocks.append('')
+
+ cwd = os.getcwd()
+
+ blocks.append('Testing an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/')
+ blocks.append('Example #1: community.general -> ~/code/ansible_collections/community/general/')
+ blocks.append('Example #2: ansible.util -> ~/.ansible/collections/ansible_collections/ansible/util/')
+ blocks.append('')
+ blocks.append(f'Current working directory: {cwd}/')
+
+ if os.path.basename(os.path.dirname(cwd)) == 'ansible_collections':
+ blocks.append(f'Expected parent directory: {os.path.dirname(cwd)}/{{namespace}}/{{collection}}/')
+ elif os.path.basename(cwd) == 'ansible_collections':
+ blocks.append(f'Expected parent directory: {cwd}/{{namespace}}/{{collection}}/')
+ elif 'ansible_collections' not in cwd.split(os.path.sep):
+ blocks.append('No "ansible_collections" parent directory was found.')
+
+ if self.content.collection:
+ if not is_valid_identifier(self.content.collection.namespace):
+ blocks.append(f'The namespace "{self.content.collection.namespace}" is an invalid identifier or a reserved keyword.')
+
+ if not is_valid_identifier(self.content.collection.name):
+ blocks.append(f'The name "{self.content.collection.name}" is an invalid identifier or a reserved keyword.')
+
+ message = '\n'.join(blocks)
+
+ return message
+
+
+@cache
+def data_context() -> DataContext:
+ """Initialize provider plugins."""
+ provider_types = (
+ 'layout',
+ 'source',
+ )
+
+ for provider_type in provider_types:
+ import_plugins('provider/%s' % provider_type)
+
+ context = DataContext()
+
+ return context
+
+
+@dataclasses.dataclass(frozen=True)
+class PluginInfo:
+ """Information about an Ansible plugin."""
+ plugin_type: str
+ name: str
+ paths: list[str]
+
+
+@cache
+def content_plugins() -> dict[str, dict[str, PluginInfo]]:
+ """
+ Analyze content.
+ The primary purpose of this analysis is to facilitate mapping of integration tests to the plugin(s) they are intended to test.
+ """
+ plugins: dict[str, dict[str, PluginInfo]] = {}
+
+ for plugin_type, plugin_directory in data_context().content.plugin_paths.items():
+ plugin_paths = sorted(data_context().content.walk_files(plugin_directory))
+ plugin_directory_offset = len(plugin_directory.split(os.path.sep))
+
+ plugin_files: dict[str, list[str]] = {}
+
+ for plugin_path in plugin_paths:
+ plugin_filename = os.path.basename(plugin_path)
+ plugin_parts = plugin_path.split(os.path.sep)[plugin_directory_offset:-1]
+
+ if plugin_filename == '__init__.py':
+ if plugin_type != 'module_utils':
+ continue
+ else:
+ plugin_name = os.path.splitext(plugin_filename)[0]
+
+ if data_context().content.is_ansible and plugin_type == 'modules':
+ plugin_name = plugin_name.lstrip('_')
+
+ plugin_parts.append(plugin_name)
+
+ plugin_name = '.'.join(plugin_parts)
+
+ plugin_files.setdefault(plugin_name, []).append(plugin_filename)
+
+ plugins[plugin_type] = {plugin_name: PluginInfo(
+ plugin_type=plugin_type,
+ name=plugin_name,
+ paths=paths,
+ ) for plugin_name, paths in plugin_files.items()}
+
+ return plugins
diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py
new file mode 100644
index 0000000..0f181a2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/delegation.py
@@ -0,0 +1,375 @@
+"""Delegate test execution to another environment."""
+from __future__ import annotations
+
+import collections.abc as c
+import contextlib
+import json
+import os
+import tempfile
+import typing as t
+
+from .constants import (
+ STATUS_HOST_CONNECTION_ERROR,
+)
+
+from .locale_util import (
+ STANDARD_LOCALE,
+)
+
+from .io import (
+ make_dirs,
+)
+
+from .config import (
+ CommonConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ ShellConfig,
+ TestConfig,
+ UnitsConfig,
+)
+
+from .util import (
+ SubprocessError,
+ display,
+ filter_args,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ OutputStream,
+)
+
+from .util_common import (
+ ResultType,
+ process_scoped_temporary_directory,
+)
+
+from .containers import (
+ support_container_context,
+ ContainerDatabase,
+)
+
+from .data import (
+ data_context,
+)
+
+from .payload import (
+ create_payload,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+from .host_configs import (
+ OriginConfig,
+ PythonConfig,
+)
+
+from .connections import (
+ Connection,
+ DockerConnection,
+ SshConnection,
+ LocalConnection,
+)
+
+from .provisioning import (
+ HostState,
+)
+
+from .content_config import (
+ serialize_content_config,
+)
+
+
+@contextlib.contextmanager
+def delegation_context(args: EnvironmentConfig, host_state: HostState) -> c.Iterator[None]:
+ """Context manager for serialized host state during delegation."""
+ make_dirs(ResultType.TMP.path)
+
+ # noinspection PyUnusedLocal
+ python = host_state.controller_profile.python # make sure the python interpreter has been initialized before serializing host state
+ del python
+
+ with tempfile.TemporaryDirectory(prefix='host-', dir=ResultType.TMP.path) as host_dir:
+ args.host_settings.serialize(os.path.join(host_dir, 'settings.dat'))
+ host_state.serialize(os.path.join(host_dir, 'state.dat'))
+ serialize_content_config(args, os.path.join(host_dir, 'config.dat'))
+
+ args.host_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(host_dir))
+
+ try:
+ yield
+ finally:
+ args.host_path = None
+
+
+def delegate(args: CommonConfig, host_state: HostState, exclude: list[str], require: list[str]) -> None:
+ """Delegate execution of ansible-test to another environment."""
+ assert isinstance(args, EnvironmentConfig)
+
+ with delegation_context(args, host_state):
+ if isinstance(args, TestConfig):
+ args.metadata.ci_provider = get_ci_provider().code
+
+ make_dirs(ResultType.TMP.path)
+
+ with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
+ args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
+ args.metadata.to_file(args.metadata_path)
+
+ try:
+ delegate_command(args, host_state, exclude, require)
+ finally:
+ args.metadata_path = None
+ else:
+ delegate_command(args, host_state, exclude, require)
+
+
+def delegate_command(args: EnvironmentConfig, host_state: HostState, exclude: list[str], require: list[str]) -> None:
+ """Delegate execution based on the provided host state."""
+ con = host_state.controller_profile.get_origin_controller_connection()
+ working_directory = host_state.controller_profile.get_working_directory()
+ host_delegation = not isinstance(args.controller, OriginConfig)
+
+ if host_delegation:
+ if data_context().content.collection:
+ content_root = os.path.join(working_directory, data_context().content.collection.directory)
+ else:
+ content_root = os.path.join(working_directory, 'ansible')
+
+ ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin')
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payload_file:
+ create_payload(args, payload_file.name)
+ con.extract_archive(chdir=working_directory, src=payload_file)
+ else:
+ content_root = working_directory
+ ansible_bin_path = ANSIBLE_BIN_PATH
+
+ command = generate_command(args, host_state.controller_profile.python, ansible_bin_path, content_root, exclude, require)
+
+ if isinstance(con, SshConnection):
+ ssh = con.settings
+ else:
+ ssh = None
+
+ options = []
+
+ if isinstance(args, IntegrationConfig) and args.controller.is_managed and all(target.is_managed for target in args.targets):
+ if not args.allow_destructive:
+ options.append('--allow-destructive')
+
+ with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase]
+ if containers:
+ options.extend(['--containers', json.dumps(containers.to_dict())])
+
+ # Run unit tests unprivileged to prevent stray writes to the source tree.
+ # Also disconnect from the network once requirements have been installed.
+ if isinstance(args, UnitsConfig) and isinstance(con, DockerConnection):
+ pytest_user = 'pytest'
+
+ writable_dirs = [
+ os.path.join(content_root, ResultType.JUNIT.relative_path),
+ os.path.join(content_root, ResultType.COVERAGE.relative_path),
+ ]
+
+ con.run(['mkdir', '-p'] + writable_dirs, capture=True)
+ con.run(['chmod', '777'] + writable_dirs, capture=True)
+ con.run(['chmod', '755', working_directory], capture=True)
+ con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)], capture=True)
+ con.run(['useradd', pytest_user, '--create-home'], capture=True)
+
+ con.run(insert_options(command, options + ['--requirements-mode', 'only']), capture=False)
+
+ container = con.inspect()
+ networks = container.get_network_names()
+
+ if networks is not None:
+ for network in networks:
+ try:
+ con.disconnect_network(network)
+ except SubprocessError:
+ display.warning(
+ 'Unable to disconnect network "%s" (this is normal under podman). '
+ 'Tests will not be isolated from the network. Network-related tests may '
+ 'misbehave.' % (network,)
+ )
+ else:
+ display.warning('Network disconnection is not supported (this is normal under podman). '
+ 'Tests will not be isolated from the network. Network-related tests may misbehave.')
+
+ options.extend(['--requirements-mode', 'skip'])
+
+ con.user = pytest_user
+
+ success = False
+ status = 0
+
+ try:
+ # When delegating, preserve the original separate stdout/stderr streams, but only when the following conditions are met:
+ # 1) Display output is being sent to stderr. This indicates the output on stdout must be kept separate from stderr.
+ # 2) The delegation is non-interactive. Interactive mode, which generally uses a TTY, is not compatible with intercepting stdout/stderr.
+ # The downside to having separate streams is that individual lines of output from each are more likely to appear out-of-order.
+ output_stream = OutputStream.ORIGINAL if args.display_stderr and not args.interactive else None
+ con.run(insert_options(command, options), capture=False, interactive=args.interactive, output_stream=output_stream)
+ success = True
+ except SubprocessError as ex:
+ status = ex.status
+ raise
+ finally:
+ if host_delegation:
+ download_results(args, con, content_root, success)
+
+ if not success and status == STATUS_HOST_CONNECTION_ERROR:
+ for target in host_state.target_profiles:
+ target.on_target_failure() # when the controller is delegated, report failures after delegation fails
+
+
+def insert_options(command: list[str], options: list[str]) -> list[str]:
+ """Insert addition command line options into the given command and return the result."""
+ result = []
+
+ for arg in command:
+ if options and arg.startswith('--'):
+ result.extend(options)
+ options = None
+
+ result.append(arg)
+
+ return result
+
+
+def download_results(args: EnvironmentConfig, con: Connection, content_root: str, success: bool) -> None:
+ """Download results from a delegated controller."""
+ remote_results_root = os.path.join(content_root, data_context().content.results_path)
+ local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
+
+ remote_test_root = os.path.dirname(remote_results_root)
+ remote_results_name = os.path.basename(remote_results_root)
+
+ make_dirs(local_test_root) # make sure directory exists for collections which have no tests
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-test-result-', suffix='.tgz') as result_file:
+ try:
+ con.create_archive(chdir=remote_test_root, name=remote_results_name, dst=result_file, exclude=ResultType.TMP.name)
+ except SubprocessError as ex:
+ if success:
+ raise # download errors are fatal if tests succeeded
+
+ # surface download failures as a warning here to avoid masking test failures
+ display.warning(f'Failed to download results while handling an exception: {ex}')
+ else:
+ result_file.seek(0)
+
+ local_con = LocalConnection(args)
+ local_con.extract_archive(chdir=local_test_root, src=result_file)
+
+
+def generate_command(
+ args: EnvironmentConfig,
+ python: PythonConfig,
+ ansible_bin_path: str,
+ content_root: str,
+ exclude: list[str],
+ require: list[str],
+) -> list[str]:
+ """Generate the command necessary to delegate ansible-test."""
+ cmd = [os.path.join(ansible_bin_path, 'ansible-test')]
+ cmd = [python.path] + cmd
+
+ env_vars = dict(
+ ANSIBLE_TEST_CONTENT_ROOT=content_root,
+ )
+
+ if isinstance(args.controller, OriginConfig):
+ # Expose the ansible and ansible_test library directories to the Python environment.
+ # This is only required when delegation is used on the origin host.
+ library_path = process_scoped_temporary_directory(args)
+
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(library_path, 'ansible'))
+ os.symlink(ANSIBLE_TEST_ROOT, os.path.join(library_path, 'ansible_test'))
+
+ env_vars.update(
+ PYTHONPATH=library_path,
+ )
+ else:
+ # When delegating to a host other than the origin, the locale must be explicitly set.
+ # Setting of the locale for the origin host is handled by common_environment().
+ # Not all connections support setting the locale, and for those that do, it isn't guaranteed to work.
+ # This is needed to make sure the delegated environment is configured for UTF-8 before running Python.
+ env_vars.update(
+ LC_ALL=STANDARD_LOCALE,
+ )
+
+ # Propagate the TERM environment variable to the remote host when using the shell command.
+ if isinstance(args, ShellConfig):
+ term = os.environ.get('TERM')
+
+ if term is not None:
+ env_vars.update(TERM=term)
+
+ env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)]
+
+ cmd = ['/usr/bin/env'] + env_args + cmd
+
+ cmd += list(filter_options(args, args.host_settings.filtered_args, exclude, require))
+
+ return cmd
+
+
+def filter_options(
+ args: EnvironmentConfig,
+ argv: list[str],
+ exclude: list[str],
+ require: list[str],
+) -> c.Iterable[str]:
+ """Return an iterable that filters out unwanted CLI options and injects new ones as requested."""
+ replace: list[tuple[str, int, t.Optional[t.Union[bool, str, list[str]]]]] = [
+ ('--docker-no-pull', 0, False),
+ ('--truncate', 1, str(args.truncate)),
+ ('--color', 1, 'yes' if args.color else 'no'),
+ ('--redact', 0, False),
+ ('--no-redact', 0, not args.redact),
+ ('--host-path', 1, args.host_path),
+ ]
+
+ if isinstance(args, TestConfig):
+ replace.extend([
+ ('--changed', 0, False),
+ ('--tracked', 0, False),
+ ('--untracked', 0, False),
+ ('--ignore-committed', 0, False),
+ ('--ignore-staged', 0, False),
+ ('--ignore-unstaged', 0, False),
+ ('--changed-from', 1, False),
+ ('--changed-path', 1, False),
+ ('--metadata', 1, args.metadata_path),
+ ('--exclude', 1, exclude),
+ ('--require', 1, require),
+ ('--base-branch', 1, args.base_branch or get_ci_provider().get_base_branch()),
+ ])
+
+ pass_through_args: list[str] = []
+
+ for arg in filter_args(argv, {option: count for option, count, replacement in replace}):
+ if arg == '--' or pass_through_args:
+ pass_through_args.append(arg)
+ continue
+
+ yield arg
+
+ for option, _count, replacement in replace:
+ if not replacement:
+ continue
+
+ if isinstance(replacement, bool):
+ yield option
+ elif isinstance(replacement, str):
+ yield from [option, replacement]
+ elif isinstance(replacement, list):
+ for item in replacement:
+ yield from [option, item]
+
+ yield from args.delegate_args
+ yield from pass_through_args
diff --git a/test/lib/ansible_test/_internal/dev/__init__.py b/test/lib/ansible_test/_internal/dev/__init__.py
new file mode 100644
index 0000000..e7c9b7d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/dev/__init__.py
@@ -0,0 +1,2 @@
+"""Development and testing support code. Enabled through the use of `--dev-*` command line options."""
+from __future__ import annotations
diff --git a/test/lib/ansible_test/_internal/dev/container_probe.py b/test/lib/ansible_test/_internal/dev/container_probe.py
new file mode 100644
index 0000000..be22e01
--- /dev/null
+++ b/test/lib/ansible_test/_internal/dev/container_probe.py
@@ -0,0 +1,210 @@
+"""Diagnostic utilities to probe container cgroup behavior during development and testing (both manual and integration)."""
+from __future__ import annotations
+
+import dataclasses
+import enum
+import json
+import os
+import pathlib
+import pwd
+import typing as t
+
+from ..io import (
+ read_text_file,
+ write_text_file,
+)
+
+from ..util import (
+ display,
+ ANSIBLE_TEST_TARGET_ROOT,
+)
+
+from ..config import (
+ EnvironmentConfig,
+)
+
+from ..docker_util import (
+ LOGINUID_NOT_SET,
+ docker_exec,
+ get_docker_info,
+ get_podman_remote,
+ require_docker,
+)
+
+from ..host_configs import (
+ DockerConfig,
+)
+
+from ..cgroup import (
+ CGroupEntry,
+ CGroupPath,
+ MountEntry,
+ MountType,
+)
+
+
+class CGroupState(enum.Enum):
+ """The expected state of a cgroup related mount point."""
+ HOST = enum.auto()
+ PRIVATE = enum.auto()
+ SHADOWED = enum.auto()
+
+
+@dataclasses.dataclass(frozen=True)
+class CGroupMount:
+ """Details on a cgroup mount point that is expected to be present in the container."""
+ path: str
+ type: t.Optional[str]
+ writable: t.Optional[bool]
+ state: t.Optional[CGroupState]
+
+ def __post_init__(self):
+ assert pathlib.PurePosixPath(self.path).is_relative_to(CGroupPath.ROOT)
+
+ if self.type is None:
+ assert self.state is None
+ elif self.type == MountType.TMPFS:
+ assert self.writable is True
+ assert self.state is None
+ else:
+ assert self.type in (MountType.CGROUP_V1, MountType.CGROUP_V2)
+ assert self.state is not None
+
+
+def check_container_cgroup_status(args: EnvironmentConfig, config: DockerConfig, container_name: str, expected_mounts: tuple[CGroupMount, ...]) -> None:
+ """Check the running container to examine the state of the cgroup hierarchies."""
+ cmd = ['sh', '-c', 'cat /proc/1/cgroup && echo && cat /proc/1/mountinfo']
+
+ stdout = docker_exec(args, container_name, cmd, capture=True)[0]
+ cgroups_stdout, mounts_stdout = stdout.split('\n\n')
+
+ cgroups = CGroupEntry.loads(cgroups_stdout)
+ mounts = MountEntry.loads(mounts_stdout)
+
+ mounts = tuple(mount for mount in mounts if mount.path.is_relative_to(CGroupPath.ROOT))
+
+ mount_cgroups: dict[MountEntry, CGroupEntry] = {}
+ probe_paths: dict[pathlib.PurePosixPath, t.Optional[str]] = {}
+
+ for cgroup in cgroups:
+ if cgroup.subsystem:
+ mount = ([mount for mount in mounts if
+ mount.type == MountType.CGROUP_V1 and
+ mount.path.is_relative_to(cgroup.root_path) and
+ cgroup.full_path.is_relative_to(mount.path)
+ ] or [None])[-1]
+ else:
+ mount = ([mount for mount in mounts if
+ mount.type == MountType.CGROUP_V2 and
+ mount.path == cgroup.root_path
+ ] or [None])[-1]
+
+ if mount:
+ mount_cgroups[mount] = cgroup
+
+ for mount in mounts:
+ probe_paths[mount.path] = None
+
+ if (cgroup := mount_cgroups.get(mount)) and cgroup.full_path != mount.path: # child of mount.path
+ probe_paths[cgroup.full_path] = None
+
+ probe_script = read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'probe_cgroups.py'))
+ probe_command = [config.python.path, '-', f'{container_name}-probe'] + [str(path) for path in probe_paths]
+ probe_results = json.loads(docker_exec(args, container_name, probe_command, capture=True, data=probe_script)[0])
+
+ for path in probe_paths:
+ probe_paths[path] = probe_results[str(path)]
+
+ remaining_mounts: dict[pathlib.PurePosixPath, MountEntry] = {mount.path: mount for mount in mounts}
+ results: dict[pathlib.PurePosixPath, tuple[bool, str]] = {}
+
+ for expected_mount in expected_mounts:
+ expected_path = pathlib.PurePosixPath(expected_mount.path)
+
+ if not (actual_mount := remaining_mounts.pop(expected_path, None)):
+ results[expected_path] = (False, 'not mounted')
+ continue
+
+ actual_mount_write_error = probe_paths[actual_mount.path]
+ actual_mount_errors = []
+
+ if cgroup := mount_cgroups.get(actual_mount):
+ if expected_mount.state == CGroupState.SHADOWED:
+ actual_mount_errors.append('unexpected cgroup association')
+
+ if cgroup.root_path == cgroup.full_path and expected_mount.state == CGroupState.HOST:
+ results[cgroup.root_path.joinpath('???')] = (False, 'missing cgroup')
+
+ if cgroup.full_path == actual_mount.path:
+ if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
+ actual_mount_errors.append('unexpected mount')
+ else:
+ cgroup_write_error = probe_paths[cgroup.full_path]
+ cgroup_errors = []
+
+ if expected_mount.state == CGroupState.SHADOWED:
+ cgroup_errors.append('unexpected cgroup association')
+
+ if cgroup.root_path != cgroup.full_path and expected_mount.state == CGroupState.PRIVATE:
+ cgroup_errors.append('unexpected cgroup')
+
+ if cgroup_write_error:
+ cgroup_errors.append(cgroup_write_error)
+
+ if cgroup_errors:
+ results[cgroup.full_path] = (False, f'directory errors: {", ".join(cgroup_errors)}')
+ else:
+ results[cgroup.full_path] = (True, 'directory (writable)')
+ elif expected_mount.state not in (None, CGroupState.SHADOWED):
+ actual_mount_errors.append('missing cgroup association')
+
+ if actual_mount.type != expected_mount.type and expected_mount.type is not None:
+ actual_mount_errors.append(f'type not {expected_mount.type}')
+
+ if bool(actual_mount_write_error) == expected_mount.writable:
+ actual_mount_errors.append(f'{actual_mount_write_error or "writable"}')
+
+ if actual_mount_errors:
+ results[actual_mount.path] = (False, f'{actual_mount.type} errors: {", ".join(actual_mount_errors)}')
+ else:
+ results[actual_mount.path] = (True, f'{actual_mount.type} ({actual_mount_write_error or "writable"})')
+
+ for remaining_mount in remaining_mounts.values():
+ remaining_mount_write_error = probe_paths[remaining_mount.path]
+
+ results[remaining_mount.path] = (False, f'unexpected {remaining_mount.type} mount ({remaining_mount_write_error or "writable"})')
+
+ identity = get_identity(args, config, container_name)
+ messages: list[tuple[pathlib.PurePosixPath, bool, str]] = [(path, result[0], result[1]) for path, result in sorted(results.items())]
+ message = '\n'.join(f'{"PASS" if result else "FAIL"}: {path} -> {message}' for path, result, message in messages)
+
+ display.info(f'>>> Container: {identity}\n{message.rstrip()}')
+
+ if args.dev_probe_cgroups:
+ write_text_file(os.path.join(args.dev_probe_cgroups, f'{identity}.log'), message)
+
+
+def get_identity(args: EnvironmentConfig, config: DockerConfig, container_name: str) -> str:
+ """Generate and return an identity string to use when logging test results."""
+ engine = require_docker().command
+
+ try:
+ loginuid = int(read_text_file('/proc/self/loginuid'))
+ except FileNotFoundError:
+ loginuid = LOGINUID_NOT_SET
+
+ user = pwd.getpwuid(os.getuid()).pw_name
+ login_user = user if loginuid == LOGINUID_NOT_SET else pwd.getpwuid(loginuid).pw_name
+ remote = engine == 'podman' and get_podman_remote()
+
+ tags = (
+ config.name,
+ engine,
+ f'cgroup={config.cgroup.value}@{get_docker_info(args).cgroup_version}',
+ f'remote={remote}',
+ f'user={user}',
+ f'loginuid={login_user}',
+ container_name,
+ )
+
+ return '|'.join(tags)
diff --git a/test/lib/ansible_test/_internal/diff.py b/test/lib/ansible_test/_internal/diff.py
new file mode 100644
index 0000000..edaf6c5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/diff.py
@@ -0,0 +1,226 @@
+"""Diff parsing functions and classes."""
+from __future__ import annotations
+
+import re
+import textwrap
+import traceback
+import typing as t
+
+from .util import (
+ ApplicationError,
+)
+
+
+def parse_diff(lines: list[str]) -> list[FileDiff]:
+ """Parse the given diff lines and return a list of FileDiff objects representing the changes of each file."""
+ return DiffParser(lines).files
+
+
+class FileDiff:
+ """Parsed diff for a single file."""
+ def __init__(self, old_path: str, new_path: str) -> None:
+ self.old = DiffSide(old_path, new=False)
+ self.new = DiffSide(new_path, new=True)
+ self.headers: list[str] = []
+ self.binary = False
+
+ def append_header(self, line: str) -> None:
+ """Append the given line to the list of headers for this file."""
+ self.headers.append(line)
+
+ @property
+ def is_complete(self) -> bool:
+ """True if the diff is complete, otherwise False."""
+ return self.old.is_complete and self.new.is_complete
+
+
+class DiffSide:
+ """Parsed diff for a single 'side' of a single file."""
+ def __init__(self, path: str, new: bool) -> None:
+ self.path = path
+ self.new = new
+ self.prefix = '+' if self.new else '-'
+ self.eof_newline = True
+ self.exists = True
+
+ self.lines: list[tuple[int, str]] = []
+ self.lines_and_context: list[tuple[int, str]] = []
+ self.ranges: list[tuple[int, int]] = []
+
+ self._next_line_number = 0
+ self._lines_remaining = 0
+ self._range_start = 0
+
+ def set_start(self, line_start: int, line_count: int) -> None:
+ """Set the starting line and line count."""
+ self._next_line_number = line_start
+ self._lines_remaining = line_count
+ self._range_start = 0
+
+ def append(self, line: str) -> None:
+ """Append the given line."""
+ if self._lines_remaining <= 0:
+ raise Exception('Diff range overflow.')
+
+ entry = self._next_line_number, line
+
+ if line.startswith(' '):
+ pass
+ elif line.startswith(self.prefix):
+ self.lines.append(entry)
+
+ if not self._range_start:
+ self._range_start = self._next_line_number
+ else:
+ raise Exception('Unexpected diff content prefix.')
+
+ self.lines_and_context.append(entry)
+
+ self._lines_remaining -= 1
+
+ if self._range_start:
+ if self.is_complete:
+ range_end = self._next_line_number
+ elif line.startswith(' '):
+ range_end = self._next_line_number - 1
+ else:
+ range_end = 0
+
+ if range_end:
+ self.ranges.append((self._range_start, range_end))
+ self._range_start = 0
+
+ self._next_line_number += 1
+
+ @property
+ def is_complete(self) -> bool:
+ """True if the diff is complete, otherwise False."""
+ return self._lines_remaining == 0
+
+ def format_lines(self, context: bool = True) -> list[str]:
+ """Format the diff and return a list of lines, optionally including context."""
+ if context:
+ lines = self.lines_and_context
+ else:
+ lines = self.lines
+
+ return ['%s:%4d %s' % (self.path, line[0], line[1]) for line in lines]
+
+
+class DiffParser:
+ """Parse diff lines."""
+ def __init__(self, lines: list[str]) -> None:
+ self.lines = lines
+ self.files: list[FileDiff] = []
+
+ self.action = self.process_start
+ self.line_number = 0
+ self.previous_line: t.Optional[str] = None
+ self.line: t.Optional[str] = None
+ self.file: t.Optional[FileDiff] = None
+
+ for self.line in self.lines:
+ self.line_number += 1
+
+ try:
+ self.action()
+ except Exception as ex:
+ message = textwrap.dedent('''
+ %s
+
+ Line: %d
+ Previous: %s
+ Current: %s
+ %s
+ ''').strip() % (
+ ex,
+ self.line_number,
+ self.previous_line or '',
+ self.line or '',
+ traceback.format_exc(),
+ )
+
+ raise ApplicationError(message.strip())
+
+ self.previous_line = self.line
+
+ self.complete_file()
+
+ def process_start(self) -> None:
+ """Process a diff start line."""
+ self.complete_file()
+
+ match = re.search(r'^diff --git "?(?:a/)?(?P<old_path>.*)"? "?(?:b/)?(?P<new_path>.*)"?$', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff start line.')
+
+ self.file = FileDiff(match.group('old_path'), match.group('new_path'))
+ self.action = self.process_continue
+
+ def process_range(self) -> None:
+ """Process a diff range line."""
+ match = re.search(r'^@@ -((?P<old_start>[0-9]+),)?(?P<old_count>[0-9]+) \+((?P<new_start>[0-9]+),)?(?P<new_count>[0-9]+) @@', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff range line.')
+
+ self.file.old.set_start(int(match.group('old_start') or 1), int(match.group('old_count')))
+ self.file.new.set_start(int(match.group('new_start') or 1), int(match.group('new_count')))
+ self.action = self.process_content
+
+ def process_continue(self) -> None:
+ """Process a diff start, range or header line."""
+ if self.line.startswith('diff '):
+ self.process_start()
+ elif self.line.startswith('@@ '):
+ self.process_range()
+ else:
+ self.process_header()
+
+ def process_header(self) -> None:
+ """Process a diff header line."""
+ if self.line.startswith('Binary files '):
+ self.file.binary = True
+ elif self.line == '--- /dev/null':
+ self.file.old.exists = False
+ elif self.line == '+++ /dev/null':
+ self.file.new.exists = False
+ else:
+ self.file.append_header(self.line)
+
+ def process_content(self) -> None:
+ """Process a diff content line."""
+ if self.line == r'\ No newline at end of file':
+ if self.previous_line.startswith(' '):
+ self.file.old.eof_newline = False
+ self.file.new.eof_newline = False
+ elif self.previous_line.startswith('-'):
+ self.file.old.eof_newline = False
+ elif self.previous_line.startswith('+'):
+ self.file.new.eof_newline = False
+ else:
+ raise Exception('Unexpected previous diff content line.')
+
+ return
+
+ if self.file.is_complete:
+ self.process_continue()
+ return
+
+ if self.line.startswith(' '):
+ self.file.old.append(self.line)
+ self.file.new.append(self.line)
+ elif self.line.startswith('-'):
+ self.file.old.append(self.line)
+ elif self.line.startswith('+'):
+ self.file.new.append(self.line)
+ else:
+ raise Exception('Unexpected diff content line.')
+
+ def complete_file(self) -> None:
+ """Complete processing of the current file, if any."""
+ if not self.file:
+ return
+
+ self.files.append(self.file)
diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py
new file mode 100644
index 0000000..6c38ddb
--- /dev/null
+++ b/test/lib/ansible_test/_internal/docker_util.py
@@ -0,0 +1,1005 @@
+"""Functions for accessing docker via the docker cli."""
+from __future__ import annotations
+
+import dataclasses
+import enum
+import json
+import os
+import pathlib
+import re
+import socket
+import time
+import urllib.parse
+import typing as t
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ find_executable,
+ SubprocessError,
+ cache,
+ OutputStream,
+)
+
+from .util_common import (
+ run_command,
+ raw_command,
+)
+
+from .config import (
+ CommonConfig,
+)
+
+from .thread import (
+ mutex,
+ named_lock,
+)
+
+from .cgroup import (
+ CGroupEntry,
+ MountEntry,
+ MountType,
+)
+
+DOCKER_COMMANDS = [
+ 'docker',
+ 'podman',
+]
+
+UTILITY_IMAGE = 'quay.io/ansible/ansible-test-utility-container:2.0.0'
+
+# Max number of open files in a docker container.
+# Passed with --ulimit option to the docker run command.
+MAX_NUM_OPEN_FILES = 10240
+
+# The value of /proc/*/loginuid when it is not set.
+# It is a reserved UID, which is the maximum 32-bit unsigned integer value.
+# See: https://access.redhat.com/solutions/25404
+LOGINUID_NOT_SET = 4294967295
+
+
+class DockerInfo:
+ """The results of `docker info` and `docker version` for the container runtime."""
+
+ @classmethod
+ def init(cls, args: CommonConfig) -> DockerInfo:
+ """Initialize and return a DockerInfo instance."""
+ command = require_docker().command
+
+ info_stdout = docker_command(args, ['info', '--format', '{{ json . }}'], capture=True, always=True)[0]
+ info = json.loads(info_stdout)
+
+ if server_errors := info.get('ServerErrors'):
+ # This can occur when a remote docker instance is in use and the instance is not responding, such as when the system is still starting up.
+ # In that case an error such as the following may be returned:
+ # error during connect: Get "http://{hostname}:2375/v1.24/info": dial tcp {ip_address}:2375: connect: no route to host
+ raise ApplicationError('Unable to get container host information: ' + '\n'.join(server_errors))
+
+ version_stdout = docker_command(args, ['version', '--format', '{{ json . }}'], capture=True, always=True)[0]
+ version = json.loads(version_stdout)
+
+ info = DockerInfo(args, command, info, version)
+
+ return info
+
+ def __init__(self, args: CommonConfig, engine: str, info: dict[str, t.Any], version: dict[str, t.Any]) -> None:
+ self.args = args
+ self.engine = engine
+ self.info = info
+ self.version = version
+
+ @property
+ def client(self) -> dict[str, t.Any]:
+ """The client version details."""
+ client = self.version.get('Client')
+
+ if not client:
+ raise ApplicationError('Unable to get container host client information.')
+
+ return client
+
+ @property
+ def server(self) -> dict[str, t.Any]:
+ """The server version details."""
+ server = self.version.get('Server')
+
+ if not server:
+ if self.engine == 'podman':
+ # Some Podman versions always report server version info (verified with 1.8.0 and 1.9.3).
+ # Others do not unless Podman remote is being used.
+ # To provide consistency, use the client version if the server version isn't provided.
+ # See: https://github.com/containers/podman/issues/2671#issuecomment-804382934
+ return self.client
+
+ raise ApplicationError('Unable to get container host server information.')
+
+ return server
+
+ @property
+ def client_version(self) -> str:
+ """The client version."""
+ return self.client['Version']
+
+ @property
+ def server_version(self) -> str:
+ """The server version."""
+ return self.server['Version']
+
+ @property
+ def client_major_minor_version(self) -> tuple[int, int]:
+ """The client major and minor version."""
+ major, minor = self.client_version.split('.')[:2]
+ return int(major), int(minor)
+
+ @property
+ def server_major_minor_version(self) -> tuple[int, int]:
+ """The server major and minor version."""
+ major, minor = self.server_version.split('.')[:2]
+ return int(major), int(minor)
+
+ @property
+ def cgroupns_option_supported(self) -> bool:
+ """Return True if the `--cgroupns` option is supported, otherwise return False."""
+ if self.engine == 'docker':
+ # Docker added support for the `--cgroupns` option in version 20.10.
+ # Both the client and server must support the option to use it.
+ # See: https://docs.docker.com/engine/release-notes/#20100
+ return self.client_major_minor_version >= (20, 10) and self.server_major_minor_version >= (20, 10)
+
+ raise NotImplementedError(self.engine)
+
+ @property
+ def cgroup_version(self) -> int:
+ """The cgroup version of the container host."""
+ info = self.info
+ host = info.get('host')
+
+ # When the container host reports cgroup v1 it is running either cgroup v1 legacy mode or cgroup v2 hybrid mode.
+ # When the container host reports cgroup v2 it is running under cgroup v2 unified mode.
+ # See: https://github.com/containers/podman/blob/8356621249e36ed62fc7f35f12d17db9027ff076/libpod/info_linux.go#L52-L56
+ # See: https://github.com/moby/moby/blob/d082bbcc0557ec667faca81b8b33bec380b75dac/daemon/info_unix.go#L24-L27
+
+ if host:
+ return int(host['cgroupVersion'].lstrip('v')) # podman
+
+ try:
+ return int(info['CgroupVersion']) # docker
+ except KeyError:
+ pass
+
+ # Docker 20.10 (API version 1.41) added support for cgroup v2.
+ # Unfortunately the client or server is too old to report the cgroup version.
+ # If the server is old, we can infer the cgroup version.
+ # Otherwise, we'll need to fall back to detection.
+ # See: https://docs.docker.com/engine/release-notes/#20100
+ # See: https://docs.docker.com/engine/api/version-history/#v141-api-changes
+
+ if self.server_major_minor_version < (20, 10):
+ return 1 # old docker server with only cgroup v1 support
+
+ # Tell the user what versions they have and recommend they upgrade the client.
+ # Downgrading the server should also work, but we won't mention that.
+ message = (
+ f'The Docker client version is {self.client_version}. '
+ f'The Docker server version is {self.server_version}. '
+ 'Upgrade your Docker client to version 20.10 or later.'
+ )
+
+ if detect_host_properties(self.args).cgroup_v2:
+ # Unfortunately cgroup v2 was detected on the Docker server.
+ # A newer client is needed to support the `--cgroupns` option for use with cgroup v2.
+ raise ApplicationError(f'Unsupported Docker client and server combination using cgroup v2. {message}')
+
+ display.warning(f'Detected Docker server cgroup v1 using probing. {message}', unique=True)
+
+ return 1 # docker server is using cgroup v1 (or cgroup v2 hybrid)
+
+ @property
+ def docker_desktop_wsl2(self) -> bool:
+ """Return True if Docker Desktop integrated with WSL2 is detected, otherwise False."""
+ info = self.info
+
+ kernel_version = info.get('KernelVersion')
+ operating_system = info.get('OperatingSystem')
+
+ dd_wsl2 = kernel_version and kernel_version.endswith('-WSL2') and operating_system == 'Docker Desktop'
+
+ return dd_wsl2
+
+ @property
+ def description(self) -> str:
+ """Describe the container runtime."""
+ tags = dict(
+ client=self.client_version,
+ server=self.server_version,
+ cgroup=f'v{self.cgroup_version}',
+ )
+
+ labels = [self.engine] + [f'{key}={value}' for key, value in tags.items()]
+
+ if self.docker_desktop_wsl2:
+ labels.append('DD+WSL2')
+
+ return f'Container runtime: {" ".join(labels)}'
+
+
+@mutex
+def get_docker_info(args: CommonConfig) -> DockerInfo:
+ """Return info for the current container runtime. The results are cached."""
+ try:
+ return get_docker_info.info # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ info = DockerInfo.init(args)
+
+ display.info(info.description, verbosity=1)
+
+ get_docker_info.info = info # type: ignore[attr-defined]
+
+ return info
+
+
+class SystemdControlGroupV1Status(enum.Enum):
+ """The state of the cgroup v1 systemd hierarchy on the container host."""
+ SUBSYSTEM_MISSING = 'The systemd cgroup subsystem was not found.'
+ FILESYSTEM_NOT_MOUNTED = 'The "/sys/fs/cgroup/systemd" filesystem is not mounted.'
+ MOUNT_TYPE_NOT_CORRECT = 'The "/sys/fs/cgroup/systemd" mount type is not correct.'
+ VALID = 'The "/sys/fs/cgroup/systemd" mount is valid.'
+
+
+@dataclasses.dataclass(frozen=True)
+class ContainerHostProperties:
+ """Container host properties detected at run time."""
+ audit_code: str
+ max_open_files: int
+ loginuid: t.Optional[int]
+ cgroup_v1: SystemdControlGroupV1Status
+ cgroup_v2: bool
+
+
+@mutex
+def detect_host_properties(args: CommonConfig) -> ContainerHostProperties:
+ """
+ Detect and return properties of the container host.
+
+ The information collected is:
+
+ - The errno result from attempting to query the container host's audit status.
+ - The max number of open files supported by the container host to run containers.
+ This value may be capped to the maximum value used by ansible-test.
+ If the value is below the desired limit, a warning is displayed.
+ - The loginuid used by the container host to run containers, or None if the audit subsystem is unavailable.
+ - The cgroup subsystems registered with the Linux kernel.
+ - The mounts visible within a container.
+ - The status of the systemd cgroup v1 hierarchy.
+
+ This information is collected together to reduce the number of container runs to probe the container host.
+ """
+ try:
+ return detect_host_properties.properties # type: ignore[attr-defined]
+ except AttributeError:
+ pass
+
+ single_line_commands = (
+ 'audit-status',
+ 'cat /proc/sys/fs/nr_open',
+ 'ulimit -Hn',
+ '(cat /proc/1/loginuid; echo)',
+ )
+
+ multi_line_commands = (
+ ' && '.join(single_line_commands),
+ 'cat /proc/1/cgroup',
+ 'cat /proc/1/mountinfo',
+ )
+
+ options = ['--volume', '/sys/fs/cgroup:/probe:ro']
+ cmd = ['sh', '-c', ' && echo "-" && '.join(multi_line_commands)]
+
+ stdout = run_utility_container(args, f'ansible-test-probe-{args.session_name}', cmd, options)[0]
+
+ if args.explain:
+ return ContainerHostProperties(
+ audit_code='???',
+ max_open_files=MAX_NUM_OPEN_FILES,
+ loginuid=LOGINUID_NOT_SET,
+ cgroup_v1=SystemdControlGroupV1Status.VALID,
+ cgroup_v2=False,
+ )
+
+ blocks = stdout.split('\n-\n')
+
+ values = blocks[0].split('\n')
+
+ audit_parts = values[0].split(' ', 1)
+ audit_status = int(audit_parts[0])
+ audit_code = audit_parts[1]
+
+ system_limit = int(values[1])
+ hard_limit = int(values[2])
+ loginuid = int(values[3]) if values[3] else None
+
+ cgroups = CGroupEntry.loads(blocks[1])
+ mounts = MountEntry.loads(blocks[2])
+
+ if hard_limit < MAX_NUM_OPEN_FILES and hard_limit < system_limit and require_docker().command == 'docker':
+ # Podman will use the highest possible limits, up to its default of 1M.
+ # See: https://github.com/containers/podman/blob/009afb50b308548eb129bc68e654db6c6ad82e7a/pkg/specgen/generate/oci.go#L39-L58
+ # Docker limits are less predictable. They could be the system limit or the user's soft limit.
+ # If Docker is running as root it should be able to use the system limit.
+ # When Docker reports a limit below the preferred value and the system limit, attempt to use the preferred value, up to the system limit.
+ options = ['--ulimit', f'nofile={min(system_limit, MAX_NUM_OPEN_FILES)}']
+ cmd = ['sh', '-c', 'ulimit -Hn']
+
+ try:
+ stdout = run_utility_container(args, f'ansible-test-ulimit-{args.session_name}', cmd, options)[0]
+ except SubprocessError as ex:
+ display.warning(str(ex))
+ else:
+ hard_limit = int(stdout)
+
+ # Check the audit error code from attempting to query the container host's audit status.
+ #
+ # The following error codes are known to occur:
+ #
+ # EPERM - Operation not permitted
+ # This occurs when the root user runs a container but lacks the AUDIT_WRITE capability.
+ # This will cause patched versions of OpenSSH to disconnect after a login succeeds.
+ # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch
+ #
+ # EBADF - Bad file number
+ # This occurs when the host doesn't support the audit system (the open_audit call fails).
+ # This allows SSH logins to succeed despite the failure.
+ # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/netlink.c#L204-L209
+ #
+ # ECONNREFUSED - Connection refused
+ # This occurs when a non-root user runs a container without the AUDIT_WRITE capability.
+ # When sending an audit message, libaudit ignores this error condition.
+ # This allows SSH logins to succeed despite the failure.
+ # See: https://github.com/Distrotech/libaudit/blob/4fc64f79c2a7f36e3ab7b943ce33ab5b013a7782/lib/deprecated.c#L48-L52
+
+ subsystems = set(cgroup.subsystem for cgroup in cgroups)
+ mount_types = {mount.path: mount.type for mount in mounts}
+
+ if 'systemd' not in subsystems:
+ cgroup_v1 = SystemdControlGroupV1Status.SUBSYSTEM_MISSING
+ elif not (mount_type := mount_types.get(pathlib.PurePosixPath('/probe/systemd'))):
+ cgroup_v1 = SystemdControlGroupV1Status.FILESYSTEM_NOT_MOUNTED
+ elif mount_type != MountType.CGROUP_V1:
+ cgroup_v1 = SystemdControlGroupV1Status.MOUNT_TYPE_NOT_CORRECT
+ else:
+ cgroup_v1 = SystemdControlGroupV1Status.VALID
+
+ cgroup_v2 = mount_types.get(pathlib.PurePosixPath('/probe')) == MountType.CGROUP_V2
+
+ display.info(f'Container host audit status: {audit_code} ({audit_status})', verbosity=1)
+ display.info(f'Container host max open files: {hard_limit}', verbosity=1)
+ display.info(f'Container loginuid: {loginuid if loginuid is not None else "unavailable"}'
+ f'{" (not set)" if loginuid == LOGINUID_NOT_SET else ""}', verbosity=1)
+
+ if hard_limit < MAX_NUM_OPEN_FILES:
+ display.warning(f'Unable to set container max open files to {MAX_NUM_OPEN_FILES}. Using container host limit of {hard_limit} instead.')
+ else:
+ hard_limit = MAX_NUM_OPEN_FILES
+
+ properties = ContainerHostProperties(
+ # The errno (audit_status) is intentionally not exposed here, as it can vary across systems and architectures.
+ # Instead, the symbolic name (audit_code) is used, which is resolved inside the container which generated the error.
+ # See: https://man7.org/linux/man-pages/man3/errno.3.html
+ audit_code=audit_code,
+ max_open_files=hard_limit,
+ loginuid=loginuid,
+ cgroup_v1=cgroup_v1,
+ cgroup_v2=cgroup_v2,
+ )
+
+ detect_host_properties.properties = properties # type: ignore[attr-defined]
+
+ return properties
+
+
+def run_utility_container(
+ args: CommonConfig,
+ name: str,
+ cmd: list[str],
+ options: list[str],
+ data: t.Optional[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command using the ansible-test utility container, returning stdout and stderr."""
+ options = options + [
+ '--name', name,
+ '--rm',
+ ]
+
+ if data:
+ options.append('-i')
+
+ docker_pull(args, UTILITY_IMAGE)
+
+ return docker_run(args, UTILITY_IMAGE, options, cmd, data)
+
+
+class DockerCommand:
+ """Details about the available docker command."""
+ def __init__(self, command: str, executable: str, version: str) -> None:
+ self.command = command
+ self.executable = executable
+ self.version = version
+
+ @staticmethod
+ def detect() -> t.Optional[DockerCommand]:
+ """Detect and return the available docker command, or None."""
+ if os.environ.get('ANSIBLE_TEST_PREFER_PODMAN'):
+ commands = list(reversed(DOCKER_COMMANDS))
+ else:
+ commands = DOCKER_COMMANDS
+
+ for command in commands:
+ executable = find_executable(command, required=False)
+
+ if executable:
+ version = raw_command([command, '-v'], env=docker_environment(), capture=True)[0].strip()
+
+ if command == 'docker' and 'podman' in version:
+ continue # avoid detecting podman as docker
+
+ display.info('Detected "%s" container runtime version: %s' % (command, version), verbosity=1)
+
+ return DockerCommand(command, executable, version)
+
+ return None
+
+
+def require_docker() -> DockerCommand:
+ """Return the docker command to invoke. Raises an exception if docker is not available."""
+ if command := get_docker_command():
+ return command
+
+ raise ApplicationError(f'No container runtime detected. Supported commands: {", ".join(DOCKER_COMMANDS)}')
+
+
+@cache
+def get_docker_command() -> t.Optional[DockerCommand]:
+ """Return the docker command to invoke, or None if docker is not available."""
+ return DockerCommand.detect()
+
+
+def docker_available() -> bool:
+ """Return True if docker is available, otherwise return False."""
+ return bool(get_docker_command())
+
+
+@cache
+def get_docker_host_ip() -> str:
+ """Return the IP of the Docker host."""
+ docker_host_ip = socket.gethostbyname(get_docker_hostname())
+
+ display.info('Detected docker host IP: %s' % docker_host_ip, verbosity=1)
+
+ return docker_host_ip
+
+
+@cache
+def get_docker_hostname() -> str:
+ """Return the hostname of the Docker service."""
+ docker_host = os.environ.get('DOCKER_HOST')
+
+ if docker_host and docker_host.startswith('tcp://'):
+ try:
+ hostname = urllib.parse.urlparse(docker_host)[1].split(':')[0]
+ display.info('Detected Docker host: %s' % hostname, verbosity=1)
+ except ValueError:
+ hostname = 'localhost'
+ display.warning('Could not parse DOCKER_HOST environment variable "%s", falling back to localhost.' % docker_host)
+ else:
+ hostname = 'localhost'
+ display.info('Assuming Docker is available on localhost.', verbosity=1)
+
+ return hostname
+
+
+@cache
+def get_podman_host_ip() -> str:
+ """Return the IP of the Podman host."""
+ podman_host_ip = socket.gethostbyname(get_podman_hostname())
+
+ display.info('Detected Podman host IP: %s' % podman_host_ip, verbosity=1)
+
+ return podman_host_ip
+
+
+@cache
+def get_podman_default_hostname() -> t.Optional[str]:
+ """
+ Return the default hostname of the Podman service.
+
+ --format was added in podman 3.3.0, this functionality depends on its availability
+ """
+ hostname: t.Optional[str] = None
+ try:
+ stdout = raw_command(['podman', 'system', 'connection', 'list', '--format=json'], env=docker_environment(), capture=True)[0]
+ except SubprocessError:
+ stdout = '[]'
+
+ try:
+ connections = json.loads(stdout)
+ except json.decoder.JSONDecodeError:
+ return hostname
+
+ for connection in connections:
+ # A trailing indicates the default
+ if connection['Name'][-1] == '*':
+ hostname = connection['URI']
+ break
+
+ return hostname
+
+
+@cache
+def get_podman_remote() -> t.Optional[str]:
+ """Return the remote podman hostname, if any, otherwise return None."""
+ # URL value resolution precedence:
+ # - command line value
+ # - environment variable CONTAINER_HOST
+ # - containers.conf
+ # - unix://run/podman/podman.sock
+ hostname = None
+
+ podman_host = os.environ.get('CONTAINER_HOST')
+ if not podman_host:
+ podman_host = get_podman_default_hostname()
+
+ if podman_host and podman_host.startswith('ssh://'):
+ try:
+ hostname = urllib.parse.urlparse(podman_host).hostname
+ except ValueError:
+ display.warning('Could not parse podman URI "%s"' % podman_host)
+ else:
+ display.info('Detected Podman remote: %s' % hostname, verbosity=1)
+ return hostname
+
+
+@cache
+def get_podman_hostname() -> str:
+ """Return the hostname of the Podman service."""
+ hostname = get_podman_remote()
+
+ if not hostname:
+ hostname = 'localhost'
+ display.info('Assuming Podman is available on localhost.', verbosity=1)
+
+ return hostname
+
+
+@cache
+def get_docker_container_id() -> t.Optional[str]:
+ """Return the current container ID if running in a container, otherwise return None."""
+ mountinfo_path = pathlib.Path('/proc/self/mountinfo')
+ container_id = None
+ engine = None
+
+ if mountinfo_path.is_file():
+ # NOTE: This method of detecting the container engine and container ID relies on implementation details of each container engine.
+ # Although the implementation details have remained unchanged for some time, there is no guarantee they will continue to work.
+ # There have been proposals to create a standard mechanism for this, but none is currently available.
+ # See: https://github.com/opencontainers/runtime-spec/issues/1105
+
+ mounts = MountEntry.loads(mountinfo_path.read_text())
+
+ for mount in mounts:
+ if str(mount.path) == '/etc/hostname':
+ # Podman generates /etc/hostname in the makePlatformBindMounts function.
+ # That function ends up using ContainerRunDirectory to generate a path like: {prefix}/{container_id}/userdata/hostname
+ # NOTE: The {prefix} portion of the path can vary, so should not be relied upon.
+ # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/libpod/container_internal_linux.go#L660-L664
+ # See: https://github.com/containers/podman/blob/480c7fbf5361f3bd8c1ed81fe4b9910c5c73b186/vendor/github.com/containers/storage/store.go#L3133
+ # This behavior has existed for ~5 years and was present in Podman version 0.2.
+ # See: https://github.com/containers/podman/pull/248
+ if match := re.search('/(?P<id>[0-9a-f]{64})/userdata/hostname$', str(mount.root)):
+ container_id = match.group('id')
+ engine = 'Podman'
+ break
+
+ # Docker generates /etc/hostname in the BuildHostnameFile function.
+ # That function ends up using the containerRoot function to generate a path like: {prefix}/{container_id}/hostname
+ # NOTE: The {prefix} portion of the path can vary, so should not be relied upon.
+ # See: https://github.com/moby/moby/blob/cd8a090e6755bee0bdd54ac8a894b15881787097/container/container_unix.go#L58
+ # See: https://github.com/moby/moby/blob/92e954a2f05998dc05773b6c64bbe23b188cb3a0/daemon/container.go#L86
+ # This behavior has existed for at least ~7 years and was present in Docker version 1.0.1.
+ # See: https://github.com/moby/moby/blob/v1.0.1/daemon/container.go#L351
+ # See: https://github.com/moby/moby/blob/v1.0.1/daemon/daemon.go#L133
+ if match := re.search('/(?P<id>[0-9a-f]{64})/hostname$', str(mount.root)):
+ container_id = match.group('id')
+ engine = 'Docker'
+ break
+
+ if container_id:
+ display.info(f'Detected execution in {engine} container ID: {container_id}', verbosity=1)
+
+ return container_id
+
+
+def docker_pull(args: CommonConfig, image: str) -> None:
+ """
+ Pull the specified image if it is not available.
+ Images without a tag or digest will not be pulled.
+ Retries up to 10 times if the pull fails.
+ A warning will be shown for any image with volumes defined.
+ Images will be pulled only once.
+ Concurrent pulls for the same image will block until the first completes.
+ """
+ with named_lock(f'docker_pull:{image}') as first:
+ if first:
+ __docker_pull(args, image)
+
+
+def __docker_pull(args: CommonConfig, image: str) -> None:
+ """Internal implementation for docker_pull. Do not call directly."""
+ if '@' not in image and ':' not in image:
+ display.info('Skipping pull of image without tag or digest: %s' % image, verbosity=2)
+ inspect = docker_image_inspect(args, image)
+ elif inspect := docker_image_inspect(args, image, always=True):
+ display.info('Skipping pull of existing image: %s' % image, verbosity=2)
+ else:
+ for _iteration in range(1, 10):
+ try:
+ docker_command(args, ['pull', image], capture=False)
+
+ if (inspect := docker_image_inspect(args, image)) or args.explain:
+ break
+
+ display.warning(f'Image "{image}" not found after pull completed. Waiting a few seconds before trying again.')
+ except SubprocessError:
+ display.warning(f'Failed to pull container image "{image}". Waiting a few seconds before trying again.')
+ time.sleep(3)
+ else:
+ raise ApplicationError(f'Failed to pull container image "{image}".')
+
+ if inspect and inspect.volumes:
+ display.warning(f'Image "{image}" contains {len(inspect.volumes)} volume(s): {", ".join(sorted(inspect.volumes))}\n'
+ 'This may result in leaking anonymous volumes. It may also prevent the image from working on some hosts or container engines.\n'
+ 'The image should be rebuilt without the use of the VOLUME instruction.',
+ unique=True)
+
+
+def docker_cp_to(args: CommonConfig, container_id: str, src: str, dst: str) -> None:
+ """Copy a file to the specified container."""
+ docker_command(args, ['cp', src, '%s:%s' % (container_id, dst)], capture=True)
+
+
+def docker_create(
+ args: CommonConfig,
+ image: str,
+ options: list[str],
+ cmd: list[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Create a container using the given docker image."""
+ return docker_command(args, ['create'] + options + [image] + cmd, capture=True)
+
+
+def docker_run(
+ args: CommonConfig,
+ image: str,
+ options: list[str],
+ cmd: list[str] = None,
+ data: t.Optional[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run a container using the given docker image."""
+ return docker_command(args, ['run'] + options + [image] + cmd, data=data, capture=True)
+
+
+def docker_start(
+ args: CommonConfig,
+ container_id: str,
+ options: list[str],
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Start a container by name or ID."""
+ return docker_command(args, ['start'] + options + [container_id], capture=True)
+
+
+def docker_rm(args: CommonConfig, container_id: str) -> None:
+ """Remove the specified container."""
+ try:
+ # Stop the container with SIGKILL immediately, then remove the container.
+ # Podman supports the `--time` option on `rm`, but only since version 4.0.0.
+ # Docker does not support the `--time` option on `rm`.
+ docker_command(args, ['stop', '--time', '0', container_id], capture=True)
+ docker_command(args, ['rm', container_id], capture=True)
+ except SubprocessError as ex:
+ # Both Podman and Docker report an error if the container does not exist.
+ # The error messages contain the same "no such container" string, differing only in capitalization.
+ if 'no such container' not in ex.stderr.lower():
+ raise ex
+
+
+class DockerError(Exception):
+ """General Docker error."""
+
+
+class ContainerNotFoundError(DockerError):
+ """The container identified by `identifier` was not found."""
+ def __init__(self, identifier: str) -> None:
+ super().__init__('The container "%s" was not found.' % identifier)
+
+ self.identifier = identifier
+
+
+class DockerInspect:
+ """The results of `docker inspect` for a single container."""
+ def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
+ self.args = args
+ self.inspection = inspection
+
+ # primary properties
+
+ @property
+ def id(self) -> str:
+ """Return the ID of the container."""
+ return self.inspection['Id']
+
+ @property
+ def network_settings(self) -> dict[str, t.Any]:
+ """Return a dictionary of the container network settings."""
+ return self.inspection['NetworkSettings']
+
+ @property
+ def state(self) -> dict[str, t.Any]:
+ """Return a dictionary of the container state."""
+ return self.inspection['State']
+
+ @property
+ def config(self) -> dict[str, t.Any]:
+ """Return a dictionary of the container configuration."""
+ return self.inspection['Config']
+
+ # nested properties
+
+ @property
+ def ports(self) -> dict[str, list[dict[str, str]]]:
+ """Return a dictionary of ports the container has published."""
+ return self.network_settings['Ports']
+
+ @property
+ def networks(self) -> t.Optional[dict[str, dict[str, t.Any]]]:
+ """Return a dictionary of the networks the container is attached to, or None if running under podman, which does not support networks."""
+ return self.network_settings.get('Networks')
+
+ @property
+ def running(self) -> bool:
+ """Return True if the container is running, otherwise False."""
+ return self.state['Running']
+
+ @property
+ def pid(self) -> int:
+ """Return the PID of the init process."""
+ if self.args.explain:
+ return 0
+
+ return self.state['Pid']
+
+ @property
+ def env(self) -> list[str]:
+ """Return a list of the environment variables used to create the container."""
+ return self.config['Env']
+
+ @property
+ def image(self) -> str:
+ """Return the image used to create the container."""
+ return self.config['Image']
+
+ # functions
+
+ def env_dict(self) -> dict[str, str]:
+ """Return a dictionary of the environment variables used to create the container."""
+ return dict((item[0], item[1]) for item in [e.split('=', 1) for e in self.env])
+
+ def get_tcp_port(self, port: int) -> t.Optional[list[dict[str, str]]]:
+ """Return a list of the endpoints published by the container for the specified TCP port, or None if it is not published."""
+ return self.ports.get('%d/tcp' % port)
+
+ def get_network_names(self) -> t.Optional[list[str]]:
+ """Return a list of the network names the container is attached to."""
+ if self.networks is None:
+ return None
+
+ return sorted(self.networks)
+
+ def get_network_name(self) -> str:
+ """Return the network name the container is attached to. Raises an exception if no network, or more than one, is attached."""
+ networks = self.get_network_names()
+
+ if not networks:
+ raise ApplicationError('No network found for Docker container: %s.' % self.id)
+
+ if len(networks) > 1:
+ raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (self.id, ', '.join(networks)))
+
+ return networks[0]
+
+
+def docker_inspect(args: CommonConfig, identifier: str, always: bool = False) -> DockerInspect:
+ """
+ Return the results of `docker container inspect` for the specified container.
+ Raises a ContainerNotFoundError if the container was not found.
+ """
+ try:
+ stdout = docker_command(args, ['container', 'inspect', identifier], capture=True, always=always)[0]
+ except SubprocessError as ex:
+ stdout = ex.stdout
+
+ if args.explain and not always:
+ items = []
+ else:
+ items = json.loads(stdout)
+
+ if len(items) == 1:
+ return DockerInspect(args, items[0])
+
+ raise ContainerNotFoundError(identifier)
+
+
+def docker_network_disconnect(args: CommonConfig, container_id: str, network: str) -> None:
+ """Disconnect the specified docker container from the given network."""
+ docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
+
+
+class DockerImageInspect:
+ """The results of `docker image inspect` for a single image."""
+ def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
+ self.args = args
+ self.inspection = inspection
+
+ # primary properties
+
+ @property
+ def config(self) -> dict[str, t.Any]:
+ """Return a dictionary of the image config."""
+ return self.inspection['Config']
+
+ # nested properties
+
+ @property
+ def volumes(self) -> dict[str, t.Any]:
+ """Return a dictionary of the image volumes."""
+ return self.config.get('Volumes') or {}
+
+ @property
+ def cmd(self) -> list[str]:
+ """The command to run when the container starts."""
+ return self.config['Cmd']
+
+
+@mutex
+def docker_image_inspect(args: CommonConfig, image: str, always: bool = False) -> t.Optional[DockerImageInspect]:
+ """
+ Return the results of `docker image inspect` for the specified image or None if the image does not exist.
+ """
+ inspect_cache: dict[str, DockerImageInspect]
+
+ try:
+ inspect_cache = docker_image_inspect.cache # type: ignore[attr-defined]
+ except AttributeError:
+ inspect_cache = docker_image_inspect.cache = {} # type: ignore[attr-defined]
+
+ if inspect_result := inspect_cache.get(image):
+ return inspect_result
+
+ try:
+ stdout = docker_command(args, ['image', 'inspect', image], capture=True, always=always)[0]
+ except SubprocessError:
+ stdout = '[]'
+
+ if args.explain and not always:
+ items = []
+ else:
+ items = json.loads(stdout)
+
+ if len(items) > 1:
+ raise ApplicationError(f'Inspection of image "{image}" resulted in {len(items)} items:\n{json.dumps(items, indent=4)}')
+
+ if len(items) == 1:
+ inspect_result = DockerImageInspect(args, items[0])
+ inspect_cache[image] = inspect_result
+ return inspect_result
+
+ return None
+
+
+class DockerNetworkInspect:
+ """The results of `docker network inspect` for a single network."""
+ def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None:
+ self.args = args
+ self.inspection = inspection
+
+
+def docker_network_inspect(args: CommonConfig, network: str, always: bool = False) -> t.Optional[DockerNetworkInspect]:
+ """
+ Return the results of `docker network inspect` for the specified network or None if the network does not exist.
+ """
+ try:
+ stdout = docker_command(args, ['network', 'inspect', network], capture=True, always=always)[0]
+ except SubprocessError:
+ stdout = '[]'
+
+ if args.explain and not always:
+ items = []
+ else:
+ items = json.loads(stdout)
+
+ if len(items) == 1:
+ return DockerNetworkInspect(args, items[0])
+
+ return None
+
+
+def docker_logs(args: CommonConfig, container_id: str) -> None:
+ """Display logs for the specified container. If an error occurs, it is displayed rather than raising an exception."""
+ try:
+ docker_command(args, ['logs', container_id], capture=False)
+ except SubprocessError as ex:
+ display.error(str(ex))
+
+
+def docker_exec(
+ args: CommonConfig,
+ container_id: str,
+ cmd: list[str],
+ capture: bool,
+ options: t.Optional[list[str]] = None,
+ stdin: t.Optional[t.IO[bytes]] = None,
+ stdout: t.Optional[t.IO[bytes]] = None,
+ interactive: bool = False,
+ output_stream: t.Optional[OutputStream] = None,
+ data: t.Optional[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Execute the given command in the specified container."""
+ if not options:
+ options = []
+
+ if data or stdin or stdout:
+ options.append('-i')
+
+ return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive,
+ output_stream=output_stream, data=data)
+
+
+def docker_command(
+ args: CommonConfig,
+ cmd: list[str],
+ capture: bool,
+ stdin: t.Optional[t.IO[bytes]] = None,
+ stdout: t.Optional[t.IO[bytes]] = None,
+ interactive: bool = False,
+ output_stream: t.Optional[OutputStream] = None,
+ always: bool = False,
+ data: t.Optional[str] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified docker command."""
+ env = docker_environment()
+ command = [require_docker().command]
+
+ if command[0] == 'podman' and get_podman_remote():
+ command.append('--remote')
+
+ return run_command(args, command + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, always=always,
+ output_stream=output_stream, data=data)
+
+
+def docker_environment() -> dict[str, str]:
+ """Return a dictionary of docker related environment variables found in the current environment."""
+ env = common_environment()
+
+ var_names = {
+ 'XDG_RUNTIME_DIR', # podman
+ }
+
+ var_prefixes = {
+ 'CONTAINER_', # podman remote
+ 'DOCKER_', # docker
+ }
+
+ env.update({name: value for name, value in os.environ.items() if name in var_names or any(name.startswith(prefix) for prefix in var_prefixes)})
+
+ return env
diff --git a/test/lib/ansible_test/_internal/encoding.py b/test/lib/ansible_test/_internal/encoding.py
new file mode 100644
index 0000000..11f0d75
--- /dev/null
+++ b/test/lib/ansible_test/_internal/encoding.py
@@ -0,0 +1,38 @@
+"""Functions for encoding and decoding strings."""
+from __future__ import annotations
+
+import typing as t
+
+ENCODING = 'utf-8'
+
+
+def to_optional_bytes(value: t.Optional[t.AnyStr], errors: str = 'strict') -> t.Optional[bytes]:
+ """Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
+ return None if value is None else to_bytes(value, errors)
+
+
+def to_optional_text(value: t.Optional[t.AnyStr], errors: str = 'strict') -> t.Optional[str]:
+ """Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
+ return None if value is None else to_text(value, errors)
+
+
+def to_bytes(value: t.AnyStr, errors: str = 'strict') -> bytes:
+ """Return the given value as bytes encoded using UTF-8 if not already bytes."""
+ if isinstance(value, bytes):
+ return value
+
+ if isinstance(value, str):
+ return value.encode(ENCODING, errors)
+
+ raise Exception('value is not bytes or text: %s' % type(value))
+
+
+def to_text(value: t.AnyStr, errors: str = 'strict') -> str:
+ """Return the given value as text decoded using UTF-8 if not already text."""
+ if isinstance(value, bytes):
+ return value.decode(ENCODING, errors)
+
+ if isinstance(value, str):
+ return value
+
+ raise Exception('value is not bytes or text: %s' % type(value))
diff --git a/test/lib/ansible_test/_internal/executor.py b/test/lib/ansible_test/_internal/executor.py
new file mode 100644
index 0000000..0c94cf3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/executor.py
@@ -0,0 +1,115 @@
+"""Execute Ansible tests."""
+from __future__ import annotations
+
+import typing as t
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ ApplicationWarning,
+ display,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+from .classification import (
+ categorize_changes,
+)
+
+from .config import (
+ TestConfig,
+)
+
+from .metadata import (
+ ChangeDescription,
+)
+
+from .provisioning import (
+ HostState,
+)
+
+
+def get_changes_filter(args: TestConfig) -> list[str]:
+ """Return a list of targets which should be tested based on the changes made."""
+ paths = detect_changes(args)
+
+ if not args.metadata.change_description:
+ if paths:
+ changes = categorize_changes(args, paths, args.command)
+ else:
+ changes = ChangeDescription()
+
+ args.metadata.change_description = changes
+
+ if paths is None:
+ return [] # change detection not enabled, do not filter targets
+
+ if not paths:
+ raise NoChangesDetected()
+
+ if args.metadata.change_description.targets is None:
+ raise NoTestsForChanges()
+
+ return args.metadata.change_description.targets
+
+
+def detect_changes(args: TestConfig) -> t.Optional[list[str]]:
+ """Return a list of changed paths."""
+ if args.changed:
+ paths = get_ci_provider().detect_changes(args)
+ elif args.changed_from or args.changed_path:
+ paths = args.changed_path or []
+ if args.changed_from:
+ paths += read_text_file(args.changed_from).splitlines()
+ else:
+ return None # change detection not enabled
+
+ if paths is None:
+ return None # act as though change detection not enabled, do not filter targets
+
+ display.info('Detected changes in %d file(s).' % len(paths))
+
+ for path in paths:
+ display.info(path, verbosity=1)
+
+ return paths
+
+
+class NoChangesDetected(ApplicationWarning):
+ """Exception when change detection was performed, but no changes were found."""
+ def __init__(self) -> None:
+ super().__init__('No changes detected.')
+
+
+class NoTestsForChanges(ApplicationWarning):
+ """Exception when changes detected, but no tests trigger as a result."""
+ def __init__(self) -> None:
+ super().__init__('No tests found for detected changes.')
+
+
+class Delegate(Exception):
+ """Trigger command delegation."""
+ def __init__(self, host_state: HostState, exclude: list[str] = None, require: list[str] = None) -> None:
+ super().__init__()
+
+ self.host_state = host_state
+ self.exclude = exclude or []
+ self.require = require or []
+
+
+class ListTargets(Exception):
+ """List integration test targets instead of executing them."""
+ def __init__(self, target_names: list[str]) -> None:
+ super().__init__()
+
+ self.target_names = target_names
+
+
+class AllTargetsSkipped(ApplicationWarning):
+ """All targets skipped."""
+ def __init__(self) -> None:
+ super().__init__('All targets skipped.')
diff --git a/test/lib/ansible_test/_internal/git.py b/test/lib/ansible_test/_internal/git.py
new file mode 100644
index 0000000..c1909f0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/git.py
@@ -0,0 +1,102 @@
+"""Wrapper around git command-line tools."""
+from __future__ import annotations
+
+import re
+import typing as t
+
+from .util import (
+ SubprocessError,
+ raw_command,
+)
+
+
+class Git:
+ """Wrapper around git command-line tools."""
+ def __init__(self, root: t.Optional[str] = None) -> None:
+ self.git = 'git'
+ self.root = root
+
+ def get_diff(self, args: list[str], git_options: t.Optional[list[str]] = None) -> list[str]:
+ """Run `git diff` and return the result as a list."""
+ cmd = ['diff'] + args
+ if git_options is None:
+ git_options = ['-c', 'core.quotePath=']
+ return self.run_git_split(git_options + cmd, '\n', str_errors='replace')
+
+ def get_diff_names(self, args: list[str]) -> list[str]:
+ """Return a list of file names from the `git diff` command."""
+ cmd = ['diff', '--name-only', '--no-renames', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_submodule_paths(self) -> list[str]:
+ """Return a list of submodule paths recursively."""
+ cmd = ['submodule', 'status', '--recursive']
+ output = self.run_git_split(cmd, '\n')
+ submodule_paths = [re.search(r'^.[0-9a-f]+ (?P<path>[^ ]+)', line).group('path') for line in output]
+
+ # status is returned for all submodules in the current git repository relative to the current directory
+ # when the current directory is not the root of the git repository this can yield relative paths which are not below the current directory
+ # this can occur when multiple collections are in a git repo and some collections are submodules when others are not
+ # specifying "." as the path to enumerate would limit results to the current directory, but can cause the git command to fail with the error:
+ # error: pathspec '.' did not match any file(s) known to git
+ # this can occur when the current directory contains no files tracked by git
+ # instead we'll filter out the relative paths, since we're only interested in those at or below the current directory
+ submodule_paths = [path for path in submodule_paths if not path.startswith('../')]
+
+ return submodule_paths
+
+ def get_file_names(self, args: list[str]) -> list[str]:
+ """Return a list of file names from the `git ls-files` command."""
+ cmd = ['ls-files', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_branches(self) -> list[str]:
+ """Return the list of branches."""
+ cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)']
+ return self.run_git_split(cmd)
+
+ def get_branch(self) -> str:
+ """Return the current branch name."""
+ cmd = ['symbolic-ref', '--short', 'HEAD']
+ return self.run_git(cmd).strip()
+
+ def get_rev_list(self, commits: t.Optional[list[str]] = None, max_count: t.Optional[int] = None) -> list[str]:
+ """Return the list of results from the `git rev-list` command."""
+ cmd = ['rev-list']
+
+ if commits:
+ cmd += commits
+ else:
+ cmd += ['HEAD']
+
+ if max_count:
+ cmd += ['--max-count', '%s' % max_count]
+
+ return self.run_git_split(cmd)
+
+ def get_branch_fork_point(self, branch: str) -> str:
+ """Return a reference to the point at which the given branch was forked."""
+ cmd = ['merge-base', '--fork-point', branch]
+ return self.run_git(cmd).strip()
+
+ def is_valid_ref(self, ref: str) -> bool:
+ """Return True if the given reference is valid, otherwise return False."""
+ cmd = ['show', ref]
+ try:
+ self.run_git(cmd, str_errors='replace')
+ return True
+ except SubprocessError:
+ return False
+
+ def run_git_split(self, cmd: list[str], separator: t.Optional[str] = None, str_errors: str = 'strict') -> list[str]:
+ """Run the given `git` command and return the results as a list."""
+ output = self.run_git(cmd, str_errors=str_errors).strip(separator)
+
+ if not output:
+ return []
+
+ return output.split(separator)
+
+ def run_git(self, cmd: list[str], str_errors: str = 'strict') -> str:
+ """Run the given `git` command and return the results as a string."""
+ return raw_command([self.git] + cmd, cwd=self.root, capture=True, str_errors=str_errors)[0]
diff --git a/test/lib/ansible_test/_internal/host_configs.py b/test/lib/ansible_test/_internal/host_configs.py
new file mode 100644
index 0000000..48d5fd3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/host_configs.py
@@ -0,0 +1,523 @@
+"""Configuration for the test hosts requested by the user."""
+from __future__ import annotations
+
+import abc
+import dataclasses
+import enum
+import os
+import pickle
+import sys
+import typing as t
+
+from .constants import (
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from .io import (
+ open_binary_file,
+)
+
+from .completion import (
+ AuditMode,
+ CGroupVersion,
+ CompletionConfig,
+ docker_completion,
+ DockerCompletionConfig,
+ InventoryCompletionConfig,
+ network_completion,
+ NetworkRemoteCompletionConfig,
+ PosixCompletionConfig,
+ PosixRemoteCompletionConfig,
+ PosixSshCompletionConfig,
+ remote_completion,
+ RemoteCompletionConfig,
+ windows_completion,
+ WindowsRemoteCompletionConfig,
+ filter_completion,
+)
+
+from .util import (
+ find_python,
+ get_available_python_versions,
+ str_to_version,
+ version_to_str,
+ Architecture,
+)
+
+
+@dataclasses.dataclass(frozen=True)
+class OriginCompletionConfig(PosixCompletionConfig):
+ """Pseudo completion config for the origin."""
+ def __init__(self) -> None:
+ super().__init__(name='origin')
+
+ @property
+ def supported_pythons(self) -> list[str]:
+ """Return a list of the supported Python versions."""
+ current_version = version_to_str(sys.version_info[:2])
+ versions = [version for version in SUPPORTED_PYTHON_VERSIONS if version == current_version] + \
+ [version for version in SUPPORTED_PYTHON_VERSIONS if version != current_version]
+ return versions
+
+ def get_python_path(self, version: str) -> str:
+ """Return the path of the requested Python version."""
+ version = find_python(version)
+ return version
+
+ @property
+ def is_default(self) -> bool:
+ """True if the completion entry is only used for defaults, otherwise False."""
+ return False
+
+
+@dataclasses.dataclass(frozen=True)
+class HostContext:
+ """Context used when getting and applying defaults for host configurations."""
+ controller_config: t.Optional['PosixConfig']
+
+ @property
+ def controller(self) -> bool:
+ """True if the context is for the controller, otherwise False."""
+ return not self.controller_config
+
+
+@dataclasses.dataclass
+class HostConfig(metaclass=abc.ABCMeta):
+ """Base class for host configuration."""
+ @abc.abstractmethod
+ def get_defaults(self, context: HostContext) -> CompletionConfig:
+ """Return the default settings."""
+
+ @abc.abstractmethod
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+
+ @property
+ def is_managed(self) -> bool:
+ """
+ True if the host is a managed instance, otherwise False.
+ Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
+ """
+ return False
+
+
+@dataclasses.dataclass
+class PythonConfig(metaclass=abc.ABCMeta):
+ """Configuration for Python."""
+ version: t.Optional[str] = None
+ path: t.Optional[str] = None
+
+ @property
+ def tuple(self) -> tuple[int, ...]:
+ """Return the Python version as a tuple."""
+ return str_to_version(self.version)
+
+ @property
+ def major_version(self) -> int:
+ """Return the Python major version."""
+ return self.tuple[0]
+
+ def apply_defaults(self, context: HostContext, defaults: PosixCompletionConfig) -> None:
+ """Apply default settings."""
+ if self.version in (None, 'default'):
+ self.version = defaults.get_default_python(context.controller)
+
+ if self.path:
+ if self.path.endswith('/'):
+ self.path = os.path.join(self.path, f'python{self.version}')
+
+ # FUTURE: If the host is origin, the python path could be validated here.
+ else:
+ self.path = defaults.get_python_path(self.version)
+
+ @property
+ @abc.abstractmethod
+ def is_managed(self) -> bool:
+ """
+ True if this Python is a managed instance, otherwise False.
+ Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user.
+ """
+
+
+@dataclasses.dataclass
+class NativePythonConfig(PythonConfig):
+ """Configuration for native Python."""
+ @property
+ def is_managed(self) -> bool:
+ """
+ True if this Python is a managed instance, otherwise False.
+ Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user.
+ """
+ return False
+
+
+@dataclasses.dataclass
+class VirtualPythonConfig(PythonConfig):
+ """Configuration for Python in a virtual environment."""
+ system_site_packages: t.Optional[bool] = None
+
+ def apply_defaults(self, context: HostContext, defaults: PosixCompletionConfig) -> None:
+ """Apply default settings."""
+ super().apply_defaults(context, defaults)
+
+ if self.system_site_packages is None:
+ self.system_site_packages = False
+
+ @property
+ def is_managed(self) -> bool:
+ """
+ True if this Python is a managed instance, otherwise False.
+ Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user.
+ """
+ return True
+
+
+@dataclasses.dataclass
+class PosixConfig(HostConfig, metaclass=abc.ABCMeta):
+ """Base class for POSIX host configuration."""
+ python: t.Optional[PythonConfig] = None
+
+ @property
+ @abc.abstractmethod
+ def have_root(self) -> bool:
+ """True if root is available, otherwise False."""
+
+ @abc.abstractmethod
+ def get_defaults(self, context: HostContext) -> PosixCompletionConfig:
+ """Return the default settings."""
+
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+ assert isinstance(defaults, PosixCompletionConfig)
+
+ super().apply_defaults(context, defaults)
+
+ self.python = self.python or NativePythonConfig()
+ self.python.apply_defaults(context, defaults)
+
+
+@dataclasses.dataclass
+class ControllerHostConfig(PosixConfig, metaclass=abc.ABCMeta):
+ """Base class for host configurations which support the controller."""
+ @abc.abstractmethod
+ def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
+ """Return the default targets for this host config."""
+
+
+@dataclasses.dataclass
+class RemoteConfig(HostConfig, metaclass=abc.ABCMeta):
+ """Base class for remote host configuration."""
+ name: t.Optional[str] = None
+ provider: t.Optional[str] = None
+ arch: t.Optional[str] = None
+
+ @property
+ def platform(self) -> str:
+ """The name of the platform."""
+ return self.name.partition('/')[0]
+
+ @property
+ def version(self) -> str:
+ """The version of the platform."""
+ return self.name.partition('/')[2]
+
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+ assert isinstance(defaults, RemoteCompletionConfig)
+
+ super().apply_defaults(context, defaults)
+
+ if self.provider == 'default':
+ self.provider = None
+
+ self.provider = self.provider or defaults.provider or 'aws'
+ self.arch = self.arch or defaults.arch or Architecture.X86_64
+
+ @property
+ def is_managed(self) -> bool:
+ """
+ True if this host is a managed instance, otherwise False.
+ Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
+ """
+ return True
+
+
+@dataclasses.dataclass
+class PosixSshConfig(PosixConfig):
+ """Configuration for a POSIX SSH host."""
+ user: t.Optional[str] = None
+ host: t.Optional[str] = None
+ port: t.Optional[int] = None
+
+ def get_defaults(self, context: HostContext) -> PosixSshCompletionConfig:
+ """Return the default settings."""
+ return PosixSshCompletionConfig(
+ user=self.user,
+ host=self.host,
+ )
+
+ @property
+ def have_root(self) -> bool:
+ """True if root is available, otherwise False."""
+ return self.user == 'root'
+
+
+@dataclasses.dataclass
+class InventoryConfig(HostConfig):
+ """Configuration using inventory."""
+ path: t.Optional[str] = None
+
+ def get_defaults(self, context: HostContext) -> InventoryCompletionConfig:
+ """Return the default settings."""
+ return InventoryCompletionConfig()
+
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+ assert isinstance(defaults, InventoryCompletionConfig)
+
+
+@dataclasses.dataclass
+class DockerConfig(ControllerHostConfig, PosixConfig):
+ """Configuration for a docker host."""
+ name: t.Optional[str] = None
+ image: t.Optional[str] = None
+ memory: t.Optional[int] = None
+ privileged: t.Optional[bool] = None
+ seccomp: t.Optional[str] = None
+ cgroup: t.Optional[CGroupVersion] = None
+ audit: t.Optional[AuditMode] = None
+
+ def get_defaults(self, context: HostContext) -> DockerCompletionConfig:
+ """Return the default settings."""
+ return filter_completion(docker_completion()).get(self.name) or DockerCompletionConfig(
+ name=self.name,
+ image=self.name,
+ placeholder=True,
+ )
+
+ def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
+ """Return the default targets for this host config."""
+ if self.name in filter_completion(docker_completion()):
+ defaults = self.get_defaults(context)
+ pythons = {version: defaults.get_python_path(version) for version in defaults.supported_pythons}
+ else:
+ pythons = {context.controller_config.python.version: context.controller_config.python.path}
+
+ return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()]
+
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+ assert isinstance(defaults, DockerCompletionConfig)
+
+ super().apply_defaults(context, defaults)
+
+ self.name = defaults.name
+ self.image = defaults.image
+
+ if self.seccomp is None:
+ self.seccomp = defaults.seccomp
+
+ if self.cgroup is None:
+ self.cgroup = defaults.cgroup_enum
+
+ if self.audit is None:
+ self.audit = defaults.audit_enum
+
+ if self.privileged is None:
+ self.privileged = False
+
+ @property
+ def is_managed(self) -> bool:
+ """
+ True if this host is a managed instance, otherwise False.
+ Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
+ """
+ return True
+
+ @property
+ def have_root(self) -> bool:
+ """True if root is available, otherwise False."""
+ return True
+
+
+@dataclasses.dataclass
+class PosixRemoteConfig(RemoteConfig, ControllerHostConfig, PosixConfig):
+ """Configuration for a POSIX remote host."""
+ become: t.Optional[str] = None
+
+ def get_defaults(self, context: HostContext) -> PosixRemoteCompletionConfig:
+ """Return the default settings."""
+ # pylint: disable=unexpected-keyword-arg # see: https://github.com/PyCQA/pylint/issues/7434
+ return filter_completion(remote_completion()).get(self.name) or remote_completion().get(self.platform) or PosixRemoteCompletionConfig(
+ name=self.name,
+ placeholder=True,
+ )
+
+ def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
+ """Return the default targets for this host config."""
+ if self.name in filter_completion(remote_completion()):
+ defaults = self.get_defaults(context)
+ pythons = {version: defaults.get_python_path(version) for version in defaults.supported_pythons}
+ else:
+ pythons = {context.controller_config.python.version: context.controller_config.python.path}
+
+ return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in pythons.items()]
+
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+ assert isinstance(defaults, PosixRemoteCompletionConfig)
+
+ super().apply_defaults(context, defaults)
+
+ self.become = self.become or defaults.become
+
+ @property
+ def have_root(self) -> bool:
+ """True if root is available, otherwise False."""
+ return True
+
+
+@dataclasses.dataclass
+class WindowsConfig(HostConfig, metaclass=abc.ABCMeta):
+ """Base class for Windows host configuration."""
+
+
+@dataclasses.dataclass
+class WindowsRemoteConfig(RemoteConfig, WindowsConfig):
+ """Configuration for a remote Windows host."""
+ def get_defaults(self, context: HostContext) -> WindowsRemoteCompletionConfig:
+ """Return the default settings."""
+ return filter_completion(windows_completion()).get(self.name) or windows_completion().get(self.platform)
+
+
+@dataclasses.dataclass
+class WindowsInventoryConfig(InventoryConfig, WindowsConfig):
+ """Configuration for Windows hosts using inventory."""
+
+
+@dataclasses.dataclass
+class NetworkConfig(HostConfig, metaclass=abc.ABCMeta):
+ """Base class for network host configuration."""
+
+
+@dataclasses.dataclass
+class NetworkRemoteConfig(RemoteConfig, NetworkConfig):
+ """Configuration for a remote network host."""
+ collection: t.Optional[str] = None
+ connection: t.Optional[str] = None
+
+ def get_defaults(self, context: HostContext) -> NetworkRemoteCompletionConfig:
+ """Return the default settings."""
+ return filter_completion(network_completion()).get(self.name) or NetworkRemoteCompletionConfig(
+ name=self.name,
+ placeholder=True,
+ )
+
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+ assert isinstance(defaults, NetworkRemoteCompletionConfig)
+
+ super().apply_defaults(context, defaults)
+
+ self.collection = self.collection or defaults.collection
+ self.connection = self.connection or defaults.connection
+
+
+@dataclasses.dataclass
+class NetworkInventoryConfig(InventoryConfig, NetworkConfig):
+ """Configuration for network hosts using inventory."""
+
+
+@dataclasses.dataclass
+class OriginConfig(ControllerHostConfig, PosixConfig):
+ """Configuration for the origin host."""
+ def get_defaults(self, context: HostContext) -> OriginCompletionConfig:
+ """Return the default settings."""
+ return OriginCompletionConfig()
+
+ def get_default_targets(self, context: HostContext) -> list[ControllerConfig]:
+ """Return the default targets for this host config."""
+ return [ControllerConfig(python=NativePythonConfig(version=version, path=path)) for version, path in get_available_python_versions().items()]
+
+ @property
+ def have_root(self) -> bool:
+ """True if root is available, otherwise False."""
+ return os.getuid() == 0
+
+
+@dataclasses.dataclass
+class ControllerConfig(PosixConfig):
+ """Configuration for the controller host."""
+ controller: t.Optional[PosixConfig] = None
+
+ def get_defaults(self, context: HostContext) -> PosixCompletionConfig:
+ """Return the default settings."""
+ return context.controller_config.get_defaults(context)
+
+ def apply_defaults(self, context: HostContext, defaults: CompletionConfig) -> None:
+ """Apply default settings."""
+ assert isinstance(defaults, PosixCompletionConfig)
+
+ self.controller = context.controller_config
+
+ if not self.python and not defaults.supported_pythons:
+ # The user did not specify a target Python and supported Pythons are unknown, so use the controller Python specified by the user instead.
+ self.python = context.controller_config.python
+
+ super().apply_defaults(context, defaults)
+
+ @property
+ def is_managed(self) -> bool:
+ """
+ True if the host is a managed instance, otherwise False.
+ Managed instances are used exclusively by ansible-test and can safely have destructive operations performed without explicit permission from the user.
+ """
+ return self.controller.is_managed
+
+ @property
+ def have_root(self) -> bool:
+ """True if root is available, otherwise False."""
+ return self.controller.have_root
+
+
+class FallbackReason(enum.Enum):
+ """Reason fallback was performed."""
+ ENVIRONMENT = enum.auto()
+ PYTHON = enum.auto()
+
+
+@dataclasses.dataclass(frozen=True)
+class FallbackDetail:
+ """Details about controller fallback behavior."""
+ reason: FallbackReason
+ message: str
+
+
+@dataclasses.dataclass(frozen=True)
+class HostSettings:
+ """Host settings for the controller and targets."""
+ controller: ControllerHostConfig
+ targets: list[HostConfig]
+ skipped_python_versions: list[str]
+ filtered_args: list[str]
+ controller_fallback: t.Optional[FallbackDetail]
+
+ def serialize(self, path: str) -> None:
+ """Serialize the host settings to the given path."""
+ with open_binary_file(path, 'wb') as settings_file:
+ pickle.dump(self, settings_file)
+
+ @staticmethod
+ def deserialize(path: str) -> HostSettings:
+ """Deserialize host settings from the path."""
+ with open_binary_file(path) as settings_file:
+ return pickle.load(settings_file)
+
+ def apply_defaults(self) -> None:
+ """Apply defaults to the host settings."""
+ context = HostContext(controller_config=None)
+ self.controller.apply_defaults(context, self.controller.get_defaults(context))
+
+ for target in self.targets:
+ context = HostContext(controller_config=self.controller)
+ target.apply_defaults(context, target.get_defaults(context))
diff --git a/test/lib/ansible_test/_internal/host_profiles.py b/test/lib/ansible_test/_internal/host_profiles.py
new file mode 100644
index 0000000..0abc996
--- /dev/null
+++ b/test/lib/ansible_test/_internal/host_profiles.py
@@ -0,0 +1,1428 @@
+"""Profiles to represent individual test hosts or a user-provided inventory file."""
+from __future__ import annotations
+
+import abc
+import dataclasses
+import os
+import shlex
+import tempfile
+import time
+import typing as t
+
+from .io import (
+ read_text_file,
+ write_text_file,
+)
+
+from .config import (
+ CommonConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ TerminateMode,
+)
+
+from .host_configs import (
+ ControllerConfig,
+ ControllerHostConfig,
+ DockerConfig,
+ HostConfig,
+ NetworkInventoryConfig,
+ NetworkRemoteConfig,
+ OriginConfig,
+ PosixConfig,
+ PosixRemoteConfig,
+ PosixSshConfig,
+ PythonConfig,
+ RemoteConfig,
+ VirtualPythonConfig,
+ WindowsInventoryConfig,
+ WindowsRemoteConfig,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+ SshKey,
+ VmResource,
+)
+
+from .util import (
+ ApplicationError,
+ SubprocessError,
+ cache,
+ display,
+ get_type_map,
+ sanitize_host_name,
+ sorted_versions,
+ InternalError,
+ HostConnectionError,
+ ANSIBLE_TEST_TARGET_ROOT,
+)
+
+from .util_common import (
+ get_docs_url,
+ intercept_python,
+)
+
+from .docker_util import (
+ docker_exec,
+ docker_image_inspect,
+ docker_logs,
+ docker_pull,
+ docker_rm,
+ get_docker_hostname,
+ require_docker,
+ get_docker_info,
+ detect_host_properties,
+ run_utility_container,
+ SystemdControlGroupV1Status,
+ LOGINUID_NOT_SET,
+ UTILITY_IMAGE,
+)
+
+from .bootstrap import (
+ BootstrapDocker,
+ BootstrapRemote,
+)
+
+from .venv import (
+ get_virtual_python,
+)
+
+from .ssh import (
+ SshConnectionDetail,
+)
+
+from .ansible_util import (
+ ansible_environment,
+ get_hosts,
+ parse_inventory,
+)
+
+from .containers import (
+ CleanupMode,
+ HostType,
+ get_container_database,
+ run_support_container,
+)
+
+from .connections import (
+ Connection,
+ DockerConnection,
+ LocalConnection,
+ SshConnection,
+)
+
+from .become import (
+ Become,
+ SUPPORTED_BECOME_METHODS,
+ Sudo,
+)
+
+from .completion import (
+ AuditMode,
+ CGroupVersion,
+)
+
+from .dev.container_probe import (
+ CGroupMount,
+ CGroupPath,
+ CGroupState,
+ MountType,
+ check_container_cgroup_status,
+)
+
+TControllerHostConfig = t.TypeVar('TControllerHostConfig', bound=ControllerHostConfig)
+THostConfig = t.TypeVar('THostConfig', bound=HostConfig)
+TPosixConfig = t.TypeVar('TPosixConfig', bound=PosixConfig)
+TRemoteConfig = t.TypeVar('TRemoteConfig', bound=RemoteConfig)
+
+
+class ControlGroupError(ApplicationError):
+ """Raised when the container host does not have the necessary cgroup support to run a container."""
+ def __init__(self, args: CommonConfig, reason: str) -> None:
+ engine = require_docker().command
+ dd_wsl2 = get_docker_info(args).docker_desktop_wsl2
+
+ message = f'''
+{reason}
+
+Run the following commands as root on the container host to resolve this issue:
+
+ mkdir /sys/fs/cgroup/systemd
+ mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
+ chown -R {{user}}:{{group}} /sys/fs/cgroup/systemd # only when rootless
+
+NOTE: These changes must be applied each time the container host is rebooted.
+'''.strip()
+
+ podman_message = '''
+ If rootless Podman is already running [1], you may need to stop it before
+ containers are able to use the new mount point.
+
+[1] Check for 'podman' and 'catatonit' processes.
+'''
+
+ dd_wsl_message = f'''
+ When using Docker Desktop with WSL2, additional configuration [1] is required.
+
+[1] {get_docs_url("https://docs.ansible.com/ansible-core/devel/dev_guide/testing_running_locally.html#docker-desktop-with-wsl2")}
+'''
+
+ if engine == 'podman':
+ message += podman_message
+ elif dd_wsl2:
+ message += dd_wsl_message
+
+ message = message.strip()
+
+ super().__init__(message)
+
+
+@dataclasses.dataclass(frozen=True)
+class Inventory:
+ """Simple representation of an Ansible inventory."""
+ host_groups: dict[str, dict[str, dict[str, t.Union[str, int]]]]
+ extra_groups: t.Optional[dict[str, list[str]]] = None
+
+ @staticmethod
+ def create_single_host(name: str, variables: dict[str, t.Union[str, int]]) -> Inventory:
+ """Return an inventory instance created from the given hostname and variables."""
+ return Inventory(host_groups=dict(all={name: variables}))
+
+ def write(self, args: CommonConfig, path: str) -> None:
+ """Write the given inventory to the specified path on disk."""
+
+ # NOTE: Switching the inventory generation to write JSON would be nice, but is currently not possible due to the use of hard-coded inventory filenames.
+ # The name `inventory` works for the POSIX integration tests, but `inventory.winrm` and `inventory.networking` will only parse in INI format.
+ # If tests are updated to use the `INVENTORY_PATH` environment variable, then this could be changed.
+ # Also, some tests detect the test type by inspecting the suffix on the inventory filename, which would break if it were changed.
+
+ inventory_text = ''
+
+ for group, hosts in self.host_groups.items():
+ inventory_text += f'[{group}]\n'
+
+ for host, variables in hosts.items():
+ kvp = ' '.join(f'{key}="{value}"' for key, value in variables.items())
+ inventory_text += f'{host} {kvp}\n'
+
+ inventory_text += '\n'
+
+ for group, children in (self.extra_groups or {}).items():
+ inventory_text += f'[{group}]\n'
+
+ for child in children:
+ inventory_text += f'{child}\n'
+
+ inventory_text += '\n'
+
+ inventory_text = inventory_text.strip()
+
+ if not args.explain:
+ write_text_file(path, inventory_text + '\n')
+
+ display.info(f'>>> Inventory\n{inventory_text}', verbosity=3)
+
+
+class HostProfile(t.Generic[THostConfig], metaclass=abc.ABCMeta):
+ """Base class for host profiles."""
+ def __init__(self,
+ *,
+ args: EnvironmentConfig,
+ config: THostConfig,
+ targets: t.Optional[list[HostConfig]],
+ ) -> None:
+ self.args = args
+ self.config = config
+ self.controller = bool(targets)
+ self.targets = targets or []
+
+ self.state: dict[str, t.Any] = {}
+ """State that must be persisted across delegation."""
+ self.cache: dict[str, t.Any] = {}
+ """Cache that must not be persisted across delegation."""
+
+ def provision(self) -> None:
+ """Provision the host before delegation."""
+
+ def setup(self) -> None:
+ """Perform out-of-band setup before delegation."""
+
+ def on_target_failure(self) -> None:
+ """Executed during failure handling if this profile is a target."""
+
+ def deprovision(self) -> None:
+ """Deprovision the host after delegation has completed."""
+
+ def wait(self) -> None:
+ """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
+
+ def configure(self) -> None:
+ """Perform in-band configuration. Executed before delegation for the controller and after delegation for targets."""
+
+ def __getstate__(self):
+ return {key: value for key, value in self.__dict__.items() if key not in ('args', 'cache')}
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+
+ # args will be populated after the instances are restored
+ self.cache = {}
+
+
+class PosixProfile(HostProfile[TPosixConfig], metaclass=abc.ABCMeta):
+ """Base class for POSIX host profiles."""
+ @property
+ def python(self) -> PythonConfig:
+ """
+ The Python to use for this profile.
+ If it is a virtual python, it will be created the first time it is requested.
+ """
+ python = self.state.get('python')
+
+ if not python:
+ python = self.config.python
+
+ if isinstance(python, VirtualPythonConfig):
+ python = get_virtual_python(self.args, python)
+
+ self.state['python'] = python
+
+ return python
+
+
+class ControllerHostProfile(PosixProfile[TControllerHostConfig], metaclass=abc.ABCMeta):
+ """Base class for profiles usable as a controller."""
+ @abc.abstractmethod
+ def get_origin_controller_connection(self) -> Connection:
+ """Return a connection for accessing the host as a controller from the origin."""
+
+ @abc.abstractmethod
+ def get_working_directory(self) -> str:
+ """Return the working directory for the host."""
+
+
+class SshTargetHostProfile(HostProfile[THostConfig], metaclass=abc.ABCMeta):
+ """Base class for profiles offering SSH connectivity."""
+ @abc.abstractmethod
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+
+
+class RemoteProfile(SshTargetHostProfile[TRemoteConfig], metaclass=abc.ABCMeta):
+ """Base class for remote instance profiles."""
+ @property
+ def core_ci_state(self) -> t.Optional[dict[str, str]]:
+ """The saved Ansible Core CI state."""
+ return self.state.get('core_ci')
+
+ @core_ci_state.setter
+ def core_ci_state(self, value: dict[str, str]) -> None:
+ """The saved Ansible Core CI state."""
+ self.state['core_ci'] = value
+
+ def provision(self) -> None:
+ """Provision the host before delegation."""
+ self.core_ci = self.create_core_ci(load=True)
+ self.core_ci.start()
+
+ self.core_ci_state = self.core_ci.save()
+
+ def deprovision(self) -> None:
+ """Deprovision the host after delegation has completed."""
+ if self.args.remote_terminate == TerminateMode.ALWAYS or (self.args.remote_terminate == TerminateMode.SUCCESS and self.args.success):
+ self.delete_instance()
+
+ @property
+ def core_ci(self) -> t.Optional[AnsibleCoreCI]:
+ """Return the cached AnsibleCoreCI instance, if any, otherwise None."""
+ return self.cache.get('core_ci')
+
+ @core_ci.setter
+ def core_ci(self, value: AnsibleCoreCI) -> None:
+ """Cache the given AnsibleCoreCI instance."""
+ self.cache['core_ci'] = value
+
+ def get_instance(self) -> t.Optional[AnsibleCoreCI]:
+ """Return the current AnsibleCoreCI instance, loading it if not already loaded."""
+ if not self.core_ci and self.core_ci_state:
+ self.core_ci = self.create_core_ci(load=False)
+ self.core_ci.load(self.core_ci_state)
+
+ return self.core_ci
+
+ def delete_instance(self) -> None:
+ """Delete the AnsibleCoreCI VM instance."""
+ core_ci = self.get_instance()
+
+ if not core_ci:
+ return # instance has not been provisioned
+
+ core_ci.stop()
+
+ def wait_for_instance(self) -> AnsibleCoreCI:
+ """Wait for an AnsibleCoreCI VM instance to become ready."""
+ core_ci = self.get_instance()
+ core_ci.wait()
+
+ return core_ci
+
+ def create_core_ci(self, load: bool) -> AnsibleCoreCI:
+ """Create and return an AnsibleCoreCI instance."""
+ if not self.config.arch:
+ raise InternalError(f'No arch specified for config: {self.config}')
+
+ return AnsibleCoreCI(
+ args=self.args,
+ resource=VmResource(
+ platform=self.config.platform,
+ version=self.config.version,
+ architecture=self.config.arch,
+ provider=self.config.provider,
+ tag='controller' if self.controller else 'target',
+ ),
+ load=load,
+ )
+
+
+class ControllerProfile(SshTargetHostProfile[ControllerConfig], PosixProfile[ControllerConfig]):
+ """Host profile for the controller as a target."""
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+ settings = SshConnectionDetail(
+ name='localhost',
+ host='localhost',
+ port=None,
+ user='root',
+ identity_file=SshKey(self.args).key,
+ python_interpreter=self.args.controller_python.path,
+ )
+
+ return [SshConnection(self.args, settings)]
+
+
+class DockerProfile(ControllerHostProfile[DockerConfig], SshTargetHostProfile[DockerConfig]):
+ """Host profile for a docker instance."""
+
+ MARKER = 'ansible-test-marker'
+
+ @dataclasses.dataclass(frozen=True)
+ class InitConfig:
+ """Configuration details required to run the container init."""
+ options: list[str]
+ command: str
+ command_privileged: bool
+ expected_mounts: tuple[CGroupMount, ...]
+
+ @property
+ def container_name(self) -> t.Optional[str]:
+ """Return the stored container name, if any, otherwise None."""
+ return self.state.get('container_name')
+
+ @container_name.setter
+ def container_name(self, value: str) -> None:
+ """Store the given container name."""
+ self.state['container_name'] = value
+
+ @property
+ def cgroup_path(self) -> t.Optional[str]:
+ """Return the path to the cgroup v1 systemd hierarchy, if any, otherwise None."""
+ return self.state.get('cgroup_path')
+
+ @cgroup_path.setter
+ def cgroup_path(self, value: str) -> None:
+ """Store the path to the cgroup v1 systemd hierarchy."""
+ self.state['cgroup_path'] = value
+
+ @property
+ def label(self) -> str:
+ """Label to apply to resources related to this profile."""
+ return f'{"controller" if self.controller else "target"}-{self.args.session_name}'
+
+ def provision(self) -> None:
+ """Provision the host before delegation."""
+ init_probe = self.args.dev_probe_cgroups is not None
+ init_config = self.get_init_config()
+
+ container = run_support_container(
+ args=self.args,
+ context='__test_hosts__',
+ image=self.config.image,
+ name=f'ansible-test-{self.label}',
+ ports=[22],
+ publish_ports=not self.controller, # connections to the controller over SSH are not required
+ options=init_config.options,
+ cleanup=CleanupMode.NO,
+ cmd=self.build_init_command(init_config, init_probe),
+ )
+
+ if not container:
+ if self.args.prime_containers:
+ if init_config.command_privileged or init_probe:
+ docker_pull(self.args, UTILITY_IMAGE)
+
+ return
+
+ self.container_name = container.name
+
+ try:
+ options = ['--pid', 'host', '--privileged']
+
+ if init_config.command and init_config.command_privileged:
+ init_command = init_config.command
+
+ if not init_probe:
+ init_command += f' && {shlex.join(self.wake_command)}'
+
+ cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p', 'sh', '-c', init_command]
+ run_utility_container(self.args, f'ansible-test-init-{self.label}', cmd, options)
+
+ if init_probe:
+ check_container_cgroup_status(self.args, self.config, self.container_name, init_config.expected_mounts)
+
+ cmd = ['nsenter', '-t', str(container.details.container.pid), '-m', '-p'] + self.wake_command
+ run_utility_container(self.args, f'ansible-test-wake-{self.label}', cmd, options)
+ except SubprocessError:
+ display.info(f'Checking container "{self.container_name}" logs...')
+ docker_logs(self.args, self.container_name)
+
+ raise
+
+ def get_init_config(self) -> InitConfig:
+ """Return init config for running under the current container engine."""
+ self.check_cgroup_requirements()
+
+ engine = require_docker().command
+ init_config = getattr(self, f'get_{engine}_init_config')()
+
+ return init_config
+
+ def get_podman_init_config(self) -> InitConfig:
+ """Return init config for running under Podman."""
+ options = self.get_common_run_options()
+ command: t.Optional[str] = None
+ command_privileged = False
+ expected_mounts: tuple[CGroupMount, ...]
+
+ cgroup_version = get_docker_info(self.args).cgroup_version
+
+ # Podman 4.4.0 updated containers/common to 0.51.0, which removed the SYS_CHROOT capability from the default list.
+ # This capability is needed by services such as sshd, so is unconditionally added here.
+ # See: https://github.com/containers/podman/releases/tag/v4.4.0
+ # See: https://github.com/containers/common/releases/tag/v0.51.0
+ # See: https://github.com/containers/common/pull/1240
+ options.extend(('--cap-add', 'SYS_CHROOT'))
+
+ # Without AUDIT_WRITE the following errors may appear in the system logs of a container after attempting to log in using SSH:
+ #
+ # fatal: linux_audit_write_entry failed: Operation not permitted
+ #
+ # This occurs when running containers as root when the container host provides audit support, but the user lacks the AUDIT_WRITE capability.
+ # The AUDIT_WRITE capability is provided by docker by default, but not podman.
+ # See: https://github.com/moby/moby/pull/7179
+ #
+ # OpenSSH Portable requires AUDIT_WRITE when logging in with a TTY if the Linux audit feature was compiled in.
+ # Containers with the feature enabled will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system.
+ # See: https://github.com/openssh/openssh-portable/blob/2dc328023f60212cd29504fc05d849133ae47355/audit-linux.c#L90
+ # See: https://github.com/openssh/openssh-portable/blob/715c892f0a5295b391ae92c26ef4d6a86ea96e8e/loginrec.c#L476-L478
+ #
+ # Some containers will be running a patched version of OpenSSH which blocks logins when EPERM is received while using the audit system.
+ # These containers will require the AUDIT_WRITE capability when EPERM is returned while accessing the audit system.
+ # See: https://src.fedoraproject.org/rpms/openssh/blob/f36/f/openssh-7.6p1-audit.patch
+ #
+ # Since only some containers carry the patch or enable the Linux audit feature in OpenSSH, this capability is enabled on a per-container basis.
+ # No warning is provided when adding this capability, since there's not really anything the user can do about it.
+ if self.config.audit == AuditMode.REQUIRED and detect_host_properties(self.args).audit_code == 'EPERM':
+ options.extend(('--cap-add', 'AUDIT_WRITE'))
+
+ # Without AUDIT_CONTROL the following errors may appear in the system logs of a container after attempting to log in using SSH:
+ #
+ # pam_loginuid(sshd:session): Error writing /proc/self/loginuid: Operation not permitted
+ # pam_loginuid(sshd:session): set_loginuid failed
+ #
+ # Containers configured to use the pam_loginuid module will encounter this error. If the module is required, logins will fail.
+ # Since most containers will have this configuration, the code to handle this issue is applied to all containers.
+ #
+ # This occurs when the loginuid is set on the container host and doesn't match the user on the container host which is running the container.
+ # Container hosts which do not use systemd are likely to leave the loginuid unset and thus be unaffected.
+ # The most common source of a mismatch is the use of sudo to run ansible-test, which changes the uid but cannot change the loginuid.
+ # This condition typically occurs only under podman, since the loginuid is inherited from the current user.
+ # See: https://github.com/containers/podman/issues/13012#issuecomment-1034049725
+ #
+ # This condition is detected by querying the loginuid of a container running on the container host.
+ # When it occurs, a warning is displayed and the AUDIT_CONTROL capability is added to containers to work around the issue.
+ # The warning serves as notice to the user that their usage of ansible-test is responsible for the additional capability requirement.
+ if (loginuid := detect_host_properties(self.args).loginuid) not in (0, LOGINUID_NOT_SET, None):
+ display.warning(f'Running containers with capability AUDIT_CONTROL since the container loginuid ({loginuid}) is incorrect. '
+ 'This is most likely due to use of sudo to run ansible-test when loginuid is already set.', unique=True)
+
+ options.extend(('--cap-add', 'AUDIT_CONTROL'))
+
+ if self.config.cgroup == CGroupVersion.NONE:
+ # Containers which do not require cgroup do not use systemd.
+
+ options.extend((
+ # Disabling systemd support in Podman will allow these containers to work on hosts without systemd.
+ # Without this, running a container on a host without systemd results in errors such as (from crun):
+ # Error: crun: error stat'ing file `/sys/fs/cgroup/systemd`: No such file or directory:
+ # A similar error occurs when using runc:
+ # OCI runtime attempted to invoke a command that was not found
+ '--systemd', 'false',
+ # A private cgroup namespace limits what is visible in /proc/*/cgroup.
+ '--cgroupns', 'private',
+ # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Podman.
+ # This helps provide a consistent container environment across various container host configurations.
+ '--tmpfs', '/sys/fs/cgroup',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1:
+ # Podman hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-write in the container.
+ # They will also create a dedicated cgroup v1 systemd hierarchy for the container.
+ # On hosts with systemd this path is: /sys/fs/cgroup/systemd/libpod_parent/libpod-{container_id}/
+ # On hosts without systemd this path is: /sys/fs/cgroup/systemd/{container_id}/
+
+ options.extend((
+ # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics).
+ '--systemd', 'always',
+ # The host namespace must be used to permit the container to access the cgroup v1 systemd hierarchy created by Podman.
+ '--cgroupns', 'host',
+ # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container.
+ # Podman will provide a cgroup v1 systemd hiearchy on top of this.
+ '--tmpfs', '/sys/fs/cgroup',
+ ))
+
+ self.check_systemd_cgroup_v1(options) # podman
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ # The mount point can be writable or not.
+ # The reason for the variation is not known.
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=None, state=CGroupState.HOST),
+ # The filesystem type can be tmpfs or devtmpfs.
+ # The reason for the variation is not known.
+ CGroupMount(path=CGroupPath.SYSTEMD_RELEASE_AGENT, type=None, writable=False, state=None),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2:
+ # Podman hosts providing cgroup v2 will give each container a read-write cgroup mount.
+
+ options.extend((
+ # Force Podman to enable systemd support since a command may be used later (to support pre-init diagnostics).
+ '--systemd', 'always',
+ # A private cgroup namespace is used to avoid exposing the host cgroup to the container.
+ '--cgroupns', 'private',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
+ )
+ elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2:
+ # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version.
+ # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create.
+ cgroup_path = self.create_systemd_cgroup_v1() # podman
+ command = f'echo 1 > {cgroup_path}/cgroup.procs'
+
+ options.extend((
+ # Force Podman to enable systemd support since a command is being provided.
+ '--systemd', 'always',
+ # A private cgroup namespace is required. Using the host cgroup namespace results in errors such as the following (from crun):
+ # Error: OCI runtime error: mount `/sys/fs/cgroup` to '/sys/fs/cgroup': Invalid argument
+ # A similar error occurs when using runc:
+ # Error: OCI runtime error: runc create failed: unable to start container process: error during container init:
+ # error mounting "/sys/fs/cgroup" to rootfs at "/sys/fs/cgroup": mount /sys/fs/cgroup:/sys/fs/cgroup (via /proc/self/fd/7), flags: 0x1000:
+ # invalid argument
+ '--cgroupns', 'private',
+ # Unlike Docker, Podman ignores a /sys/fs/cgroup tmpfs mount, instead exposing a cgroup v2 mount.
+ # The exposed volume will be read-write, but the container will have its own private namespace.
+ # Provide a read-only cgroup v1 systemd hierarchy under which the dedicated ansible-test cgroup will be mounted read-write.
+ # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy.
+ # Podman doesn't support using a tmpfs for this. Attempting to do so results in an error (from crun):
+ # Error: OCI runtime error: read: Invalid argument
+ # A similar error occurs when using runc:
+ # Error: OCI runtime error: runc create failed: unable to start container process: error during container init:
+ # error mounting "tmpfs" to rootfs at "/sys/fs/cgroup/systemd": tmpcopyup: failed to copy /sys/fs/cgroup/systemd to /proc/self/fd/7
+ # (/tmp/runctop3876247619/runctmpdir1460907418): read /proc/self/fd/7/cgroup.kill: invalid argument
+ '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:ro',
+ # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test.
+ '--volume', f'{cgroup_path}:{cgroup_path}:rw',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=False, state=CGroupState.SHADOWED),
+ CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
+ )
+ else:
+ raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.')
+
+ return self.InitConfig(
+ options=options,
+ command=command,
+ command_privileged=command_privileged,
+ expected_mounts=expected_mounts,
+ )
+
+ def get_docker_init_config(self) -> InitConfig:
+ """Return init config for running under Docker."""
+ options = self.get_common_run_options()
+ command: t.Optional[str] = None
+ command_privileged = False
+ expected_mounts: tuple[CGroupMount, ...]
+
+ cgroup_version = get_docker_info(self.args).cgroup_version
+
+ if self.config.cgroup == CGroupVersion.NONE:
+ # Containers which do not require cgroup do not use systemd.
+
+ if get_docker_info(self.args).cgroupns_option_supported:
+ # Use the `--cgroupns` option if it is supported.
+ # Older servers which do not support the option use the host group namespace.
+ # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only).
+ # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517
+ # If the host cgroup namespace is used, cgroup information will be visible, but the cgroup mounts will be unavailable due to the tmpfs below.
+ options.extend((
+ # A private cgroup namespace limits what is visible in /proc/*/cgroup.
+ '--cgroupns', 'private',
+ ))
+
+ options.extend((
+ # Mounting a tmpfs overrides the cgroup mount(s) that would otherwise be provided by Docker.
+ # This helps provide a consistent container environment across various container host configurations.
+ '--tmpfs', '/sys/fs/cgroup',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V1_ONLY) and cgroup_version == 1:
+ # Docker hosts providing cgroup v1 will automatically bind mount the systemd hierarchy read-only in the container.
+ # They will also create a dedicated cgroup v1 systemd hierarchy for the container.
+ # The cgroup v1 system hierarchy path is: /sys/fs/cgroup/systemd/{container_id}/
+
+ if get_docker_info(self.args).cgroupns_option_supported:
+ # Use the `--cgroupns` option if it is supported.
+ # Older servers which do not support the option use the host group namespace.
+ # Older clients which do not support the option cause newer servers to use the host cgroup namespace (cgroup v1 only).
+ # See: https://github.com/moby/moby/blob/master/api/server/router/container/container_routes.go#L512-L517
+ options.extend((
+ # The host cgroup namespace must be used.
+ # Otherwise, /proc/1/cgroup will report "/" for the cgroup path, which is incorrect.
+ # See: https://github.com/systemd/systemd/issues/19245#issuecomment-815954506
+ # It is set here to avoid relying on the current Docker configuration.
+ '--cgroupns', 'host',
+ ))
+
+ options.extend((
+ # Mask the host cgroup tmpfs mount to avoid exposing the host cgroup v1 hierarchies (or cgroup v2 hybrid) to the container.
+ '--tmpfs', '/sys/fs/cgroup',
+ # A cgroup v1 systemd hierarchy needs to be mounted read-write over the read-only one provided by Docker.
+ # Alternatives were tested, but were unusable due to various issues:
+ # - Attempting to remount the existing mount point read-write will result in a "mount point is busy" error.
+ # - Adding the entire "/sys/fs/cgroup" mount will expose hierarchies other than systemd.
+ # If the host is a cgroup v2 hybrid host it would also expose the /sys/fs/cgroup/unified/ hierarchy read-write.
+ # On older systems, such as an Ubuntu 18.04 host, a dedicated v2 cgroup would not be used, exposing the host cgroups to the container.
+ '--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw',
+ ))
+
+ self.check_systemd_cgroup_v1(options) # docker
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
+ )
+ elif self.config.cgroup in (CGroupVersion.V1_V2, CGroupVersion.V2_ONLY) and cgroup_version == 2:
+ # Docker hosts providing cgroup v2 will give each container a read-only cgroup mount.
+ # It must be remounted read-write before systemd starts.
+ # This must be done in a privileged container, otherwise a "permission denied" error can occur.
+ command = 'mount -o remount,rw /sys/fs/cgroup/'
+ command_privileged = True
+
+ options.extend((
+ # A private cgroup namespace is used to avoid exposing the host cgroup to the container.
+ # This matches the behavior in Podman 1.7.0 and later, which select cgroupns 'host' mode for cgroup v1 and 'private' mode for cgroup v2.
+ # See: https://github.com/containers/podman/pull/4374
+ # See: https://github.com/containers/podman/blob/main/RELEASE_NOTES.md#170
+ '--cgroupns', 'private',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.CGROUP_V2, writable=True, state=CGroupState.PRIVATE),
+ )
+ elif self.config.cgroup == CGroupVersion.V1_ONLY and cgroup_version == 2:
+ # Containers which require cgroup v1 need explicit volume mounts on container hosts not providing that version.
+ # We must put the container PID 1 into the cgroup v1 systemd hierarchy we create.
+ cgroup_path = self.create_systemd_cgroup_v1() # docker
+ command = f'echo 1 > {cgroup_path}/cgroup.procs'
+
+ options.extend((
+ # A private cgroup namespace is used since no access to the host cgroup namespace is required.
+ # This matches the configuration used for running cgroup v1 containers under Podman.
+ '--cgroupns', 'private',
+ # Provide a read-write tmpfs filesystem to support additional cgroup mount points.
+ # Without this Docker will provide a read-only cgroup2 mount instead.
+ '--tmpfs', '/sys/fs/cgroup',
+ # Provide a read-write tmpfs filesystem to simulate a systemd cgroup v1 hierarchy.
+ # Without this systemd will fail while attempting to mount the cgroup v1 systemd hierarchy.
+ '--tmpfs', '/sys/fs/cgroup/systemd',
+ # Provide the container access to the cgroup v1 systemd hierarchy created by ansible-test.
+ '--volume', f'{cgroup_path}:{cgroup_path}:rw',
+ ))
+
+ expected_mounts = (
+ CGroupMount(path=CGroupPath.ROOT, type=MountType.TMPFS, writable=True, state=None),
+ CGroupMount(path=CGroupPath.SYSTEMD, type=MountType.TMPFS, writable=True, state=None),
+ CGroupMount(path=cgroup_path, type=MountType.CGROUP_V1, writable=True, state=CGroupState.HOST),
+ )
+ else:
+ raise InternalError(f'Unhandled cgroup configuration: {self.config.cgroup} on cgroup v{cgroup_version}.')
+
+ return self.InitConfig(
+ options=options,
+ command=command,
+ command_privileged=command_privileged,
+ expected_mounts=expected_mounts,
+ )
+
+ def build_init_command(self, init_config: InitConfig, sleep: bool) -> t.Optional[list[str]]:
+ """
+ Build and return the command to start in the container.
+ Returns None if the default command for the container should be used.
+
+ The sleep duration below was selected to:
+
+ - Allow enough time to perform necessary operations in the container before waking it.
+ - Make the delay obvious if the wake command doesn't run or succeed.
+ - Avoid hanging indefinitely or for an unreasonably long time.
+
+ NOTE: The container must have a POSIX-compliant default shell "sh" with a non-builtin "sleep" command.
+ """
+ command = ''
+
+ if init_config.command and not init_config.command_privileged:
+ command += f'{init_config.command} && '
+
+ if sleep or init_config.command_privileged:
+ command += 'sleep 60 ; '
+
+ if not command:
+ return None
+
+ docker_pull(self.args, self.config.image)
+ inspect = docker_image_inspect(self.args, self.config.image)
+
+ command += f'exec {shlex.join(inspect.cmd)}'
+
+ return ['sh', '-c', command]
+
+ @property
+ def wake_command(self) -> list[str]:
+ """
+ The command used to wake the container from sleep.
+ This will be run inside our utility container, so the command used does not need to be present in the container being woken up.
+ """
+ return ['pkill', 'sleep']
+
+ def check_systemd_cgroup_v1(self, options: list[str]) -> None:
+ """Check the cgroup v1 systemd hierarchy to verify it is writeable for our container."""
+ probe_script = (read_text_file(os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'check_systemd_cgroup_v1.sh'))
+ .replace('@MARKER@', self.MARKER)
+ .replace('@LABEL@', self.label))
+
+ cmd = ['sh']
+
+ try:
+ run_utility_container(self.args, f'ansible-test-cgroup-check-{self.label}', cmd, options, data=probe_script)
+ except SubprocessError as ex:
+ if error := self.extract_error(ex.stderr):
+ raise ControlGroupError(self.args, 'Unable to create a v1 cgroup within the systemd hierarchy.\n'
+ f'Reason: {error}') from ex # cgroup probe failed
+
+ raise
+
+ def create_systemd_cgroup_v1(self) -> str:
+ """Create a unique ansible-test cgroup in the v1 systemd hierarchy and return its path."""
+ self.cgroup_path = f'/sys/fs/cgroup/systemd/ansible-test-{self.label}'
+
+ # Privileged mode is required to create the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0.
+ # The mkdir command will fail with "Permission denied" otherwise.
+ options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged']
+ cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && mkdir {shlex.quote(self.cgroup_path)}']
+
+ try:
+ run_utility_container(self.args, f'ansible-test-cgroup-create-{self.label}', cmd, options)
+ except SubprocessError as ex:
+ if error := self.extract_error(ex.stderr):
+ raise ControlGroupError(self.args, f'Unable to create a v1 cgroup within the systemd hierarchy.\n'
+ f'Reason: {error}') from ex # cgroup create permission denied
+
+ raise
+
+ return self.cgroup_path
+
+ @property
+ def delete_systemd_cgroup_v1_command(self) -> list[str]:
+ """The command used to remove the previously created ansible-test cgroup in the v1 systemd hierarchy."""
+ return ['find', self.cgroup_path, '-type', 'd', '-delete']
+
+ def delete_systemd_cgroup_v1(self) -> None:
+ """Delete a previously created ansible-test cgroup in the v1 systemd hierarchy."""
+ # Privileged mode is required to remove the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0.
+ # The BusyBox find utility will report "Permission denied" otherwise, although it still exits with a status code of 0.
+ options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged']
+ cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && {shlex.join(self.delete_systemd_cgroup_v1_command)}']
+
+ try:
+ run_utility_container(self.args, f'ansible-test-cgroup-delete-{self.label}', cmd, options)
+ except SubprocessError as ex:
+ if error := self.extract_error(ex.stderr):
+ if error.endswith(': No such file or directory'):
+ return
+
+ display.error(str(ex))
+
+ def extract_error(self, value: str) -> t.Optional[str]:
+ """
+ Extract the ansible-test portion of the error message from the given value and return it.
+ Returns None if no ansible-test marker was found.
+ """
+ lines = value.strip().splitlines()
+
+ try:
+ idx = lines.index(self.MARKER)
+ except ValueError:
+ return None
+
+ lines = lines[idx + 1:]
+ message = '\n'.join(lines)
+
+ return message
+
+ def check_cgroup_requirements(self) -> None:
+ """Check cgroup requirements for the container."""
+ cgroup_version = get_docker_info(self.args).cgroup_version
+
+ if cgroup_version not in (1, 2):
+ raise ApplicationError(f'The container host provides cgroup v{cgroup_version}, but only version v1 and v2 are supported.')
+
+ # Stop early for containers which require cgroup v2 when the container host does not provide it.
+ # None of the containers included with ansible-test currently use this configuration.
+ # Support for v2-only was added in preparation for the eventual removal of cgroup v1 support from systemd after EOY 2023.
+ # See: https://github.com/systemd/systemd/pull/24086
+ if self.config.cgroup == CGroupVersion.V2_ONLY and cgroup_version != 2:
+ raise ApplicationError(f'Container {self.config.name} requires cgroup v2 but the container host provides cgroup v{cgroup_version}.')
+
+ # Containers which use old versions of systemd (earlier than version 226) require cgroup v1 support.
+ # If the host is a cgroup v2 (unified) host, changes must be made to how the container is run.
+ #
+ # See: https://github.com/systemd/systemd/blob/main/NEWS
+ # Under the "CHANGES WITH 226" section:
+ # > systemd now optionally supports the new Linux kernel "unified" control group hierarchy.
+ #
+ # NOTE: The container host must have the cgroup v1 mount already present.
+ # If the container is run rootless, the user it runs under must have permissions to the mount.
+ #
+ # The following commands can be used to make the mount available:
+ #
+ # mkdir /sys/fs/cgroup/systemd
+ # mount cgroup -t cgroup /sys/fs/cgroup/systemd -o none,name=systemd,xattr
+ # chown -R {user}:{group} /sys/fs/cgroup/systemd # only when rootless
+ #
+ # See: https://github.com/containers/crun/blob/main/crun.1.md#runocisystemdforce_cgroup_v1path
+ if self.config.cgroup == CGroupVersion.V1_ONLY or (self.config.cgroup != CGroupVersion.NONE and get_docker_info(self.args).cgroup_version == 1):
+ if (cgroup_v1 := detect_host_properties(self.args).cgroup_v1) != SystemdControlGroupV1Status.VALID:
+ if self.config.cgroup == CGroupVersion.V1_ONLY:
+ if get_docker_info(self.args).cgroup_version == 2:
+ reason = f'Container {self.config.name} requires cgroup v1, but the container host only provides cgroup v2.'
+ else:
+ reason = f'Container {self.config.name} requires cgroup v1, but the container host does not appear to be running systemd.'
+ else:
+ reason = 'The container host provides cgroup v1, but does not appear to be running systemd.'
+
+ reason += f'\n{cgroup_v1.value}'
+
+ raise ControlGroupError(self.args, reason) # cgroup probe reported invalid state
+
+ def setup(self) -> None:
+ """Perform out-of-band setup before delegation."""
+ bootstrapper = BootstrapDocker(
+ controller=self.controller,
+ python_versions=[self.python.version],
+ ssh_key=SshKey(self.args),
+ )
+
+ setup_sh = bootstrapper.get_script()
+ shell = setup_sh.splitlines()[0][2:]
+
+ try:
+ docker_exec(self.args, self.container_name, [shell], data=setup_sh, capture=False)
+ except SubprocessError:
+ display.info(f'Checking container "{self.container_name}" logs...')
+ docker_logs(self.args, self.container_name)
+ raise
+
+ def deprovision(self) -> None:
+ """Deprovision the host after delegation has completed."""
+ container_exists = False
+
+ if self.container_name:
+ if self.args.docker_terminate == TerminateMode.ALWAYS or (self.args.docker_terminate == TerminateMode.SUCCESS and self.args.success):
+ docker_rm(self.args, self.container_name)
+ else:
+ container_exists = True
+
+ if self.cgroup_path:
+ if container_exists:
+ display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing. '
+ f'Then run `{shlex.join(self.delete_systemd_cgroup_v1_command)}` on the container host.')
+ else:
+ self.delete_systemd_cgroup_v1()
+ elif container_exists:
+ display.notice(f'Remember to run `{require_docker().command} rm -f {self.container_name}` when finished testing.')
+
+ def wait(self) -> None:
+ """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
+ if not self.controller:
+ con = self.get_controller_target_connections()[0]
+ last_error = ''
+
+ for dummy in range(1, 10):
+ try:
+ con.run(['id'], capture=True)
+ except SubprocessError as ex:
+ if 'Permission denied' in ex.message:
+ raise
+
+ last_error = str(ex)
+ time.sleep(1)
+ else:
+ return
+
+ display.info('Checking SSH debug output...')
+ display.info(last_error)
+
+ if not self.args.delegate and not self.args.host_path:
+ def callback() -> None:
+ """Callback to run during error display."""
+ self.on_target_failure() # when the controller is not delegated, report failures immediately
+ else:
+ callback = None
+
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} container {self.container_name}.', callback)
+
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+ containers = get_container_database(self.args)
+ access = containers.data[HostType.control]['__test_hosts__'][self.container_name]
+
+ host = access.host_ip
+ port = dict(access.port_map())[22]
+
+ settings = SshConnectionDetail(
+ name=self.config.name,
+ user='root',
+ host=host,
+ port=port,
+ identity_file=SshKey(self.args).key,
+ python_interpreter=self.python.path,
+ # CentOS 6 uses OpenSSH 5.3, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
+ # Since only CentOS 6 is affected, and it is only supported by ansible-core 2.12, support for RSA SHA-1 is simply hard-coded here.
+ # A substring is used to allow custom containers to work, not just the one provided with ansible-test.
+ enable_rsa_sha1='centos6' in self.config.image,
+ )
+
+ return [SshConnection(self.args, settings)]
+
+ def get_origin_controller_connection(self) -> DockerConnection:
+ """Return a connection for accessing the host as a controller from the origin."""
+ return DockerConnection(self.args, self.container_name)
+
+ def get_working_directory(self) -> str:
+ """Return the working directory for the host."""
+ return '/root'
+
+ def on_target_failure(self) -> None:
+ """Executed during failure handling if this profile is a target."""
+ display.info(f'Checking container "{self.container_name}" logs...')
+
+ try:
+ docker_logs(self.args, self.container_name)
+ except SubprocessError as ex:
+ display.error(str(ex))
+
+ if self.config.cgroup != CGroupVersion.NONE:
+ # Containers with cgroup support are assumed to be running systemd.
+ display.info(f'Checking container "{self.container_name}" systemd logs...')
+
+ try:
+ docker_exec(self.args, self.container_name, ['journalctl'], capture=False)
+ except SubprocessError as ex:
+ display.error(str(ex))
+
+ display.error(f'Connection to container "{self.container_name}" failed. See logs and original error above.')
+
+ def get_common_run_options(self) -> list[str]:
+ """Return a list of options needed to run the container."""
+ options = [
+ # These temporary mount points need to be created at run time when using Docker.
+ # They are automatically provided by Podman, but will be overridden by VOLUME instructions for the container, if they exist.
+ # If supporting containers with VOLUME instructions is not desired, these options could be limited to use with Docker.
+ # See: https://github.com/containers/podman/pull/1318
+ # Previously they were handled by the VOLUME instruction during container image creation.
+ # However, that approach creates anonymous volumes when running the container, which are then left behind after the container is deleted.
+ # These options eliminate the need for the VOLUME instruction, and override it if they are present.
+ # The mount options used are those typically found on Linux systems.
+ # Of special note is the "exec" option for "/tmp", which is required by ansible-test for path injection of executables using temporary directories.
+ '--tmpfs', '/tmp:exec',
+ '--tmpfs', '/run:exec',
+ '--tmpfs', '/run/lock', # some systemd containers require a separate tmpfs here, such as Ubuntu 20.04 and Ubuntu 22.04
+ ]
+
+ if self.config.privileged:
+ options.append('--privileged')
+
+ if self.config.memory:
+ options.extend([
+ f'--memory={self.config.memory}',
+ f'--memory-swap={self.config.memory}',
+ ])
+
+ if self.config.seccomp != 'default':
+ options.extend(['--security-opt', f'seccomp={self.config.seccomp}'])
+
+ docker_socket = '/var/run/docker.sock'
+
+ if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket):
+ options.extend(['--volume', f'{docker_socket}:{docker_socket}'])
+
+ return options
+
+
+class NetworkInventoryProfile(HostProfile[NetworkInventoryConfig]):
+ """Host profile for a network inventory."""
+
+
+class NetworkRemoteProfile(RemoteProfile[NetworkRemoteConfig]):
+ """Host profile for a network remote instance."""
+ def wait(self) -> None:
+ """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
+ self.wait_until_ready()
+
+ def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]:
+ """Return inventory variables for accessing this host."""
+ core_ci = self.wait_for_instance()
+ connection = core_ci.connection
+
+ variables: dict[str, t.Optional[t.Union[str, int]]] = dict(
+ ansible_connection=self.config.connection,
+ ansible_pipelining='yes',
+ ansible_host=connection.hostname,
+ ansible_port=connection.port,
+ ansible_user=connection.username,
+ ansible_ssh_private_key_file=core_ci.ssh_key.key,
+ # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later.
+ # IOS CSR 1000V uses an ancient SSH server, making it incompatible with RSA SHA-256/512 used by Paramiko 2.9 and later.
+ # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here.
+ # NOTE: This option only exists in ansible-core 2.14 and later. For older ansible-core versions, use of Paramiko 2.8.x or earlier is required.
+ # See: https://github.com/ansible/ansible/pull/78789
+ # See: https://github.com/ansible/ansible/pull/78842
+ ansible_paramiko_use_rsa_sha2_algorithms='no',
+ ansible_network_os=f'{self.config.collection}.{self.config.platform}' if self.config.collection else self.config.platform,
+ )
+
+ return variables
+
+ def wait_until_ready(self) -> None:
+ """Wait for the host to respond to an Ansible module request."""
+ core_ci = self.wait_for_instance()
+
+ if not isinstance(self.args, IntegrationConfig):
+ return # skip extended checks unless we're running integration tests
+
+ inventory = Inventory.create_single_host(sanitize_host_name(self.config.name), self.get_inventory_variables())
+ env = ansible_environment(self.args)
+ module_name = f'{self.config.collection + "." if self.config.collection else ""}{self.config.platform}_command'
+
+ with tempfile.NamedTemporaryFile() as inventory_file:
+ inventory.write(self.args, inventory_file.name)
+
+ cmd = ['ansible', '-m', module_name, '-a', 'commands=?', '-i', inventory_file.name, 'all']
+
+ for dummy in range(1, 90):
+ try:
+ intercept_python(self.args, self.args.controller_python, cmd, env, capture=True)
+ except SubprocessError as ex:
+ display.warning(str(ex))
+ time.sleep(10)
+ else:
+ return
+
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
+
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+ core_ci = self.wait_for_instance()
+
+ settings = SshConnectionDetail(
+ name=core_ci.name,
+ host=core_ci.connection.hostname,
+ port=core_ci.connection.port,
+ user=core_ci.connection.username,
+ identity_file=core_ci.ssh_key.key,
+ # VyOS 1.1.8 uses OpenSSH 5.5, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
+ # IOS CSR 1000V uses an ancient SSH server, making it incompatible with the default configuration of OpenSSH 8.8 and later clients.
+ # That means all network platforms currently offered by ansible-core-ci require support for RSA SHA-1, so it is simply hard-coded here.
+ enable_rsa_sha1=True,
+ )
+
+ return [SshConnection(self.args, settings)]
+
+
+class OriginProfile(ControllerHostProfile[OriginConfig]):
+ """Host profile for origin."""
+ def get_origin_controller_connection(self) -> LocalConnection:
+ """Return a connection for accessing the host as a controller from the origin."""
+ return LocalConnection(self.args)
+
+ def get_working_directory(self) -> str:
+ """Return the working directory for the host."""
+ return os.getcwd()
+
+
+class PosixRemoteProfile(ControllerHostProfile[PosixRemoteConfig], RemoteProfile[PosixRemoteConfig]):
+ """Host profile for a POSIX remote instance."""
+ def wait(self) -> None:
+ """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
+ self.wait_until_ready()
+
+ def configure(self) -> None:
+ """Perform in-band configuration. Executed before delegation for the controller and after delegation for targets."""
+ # a target uses a single python version, but a controller may include additional versions for targets running on the controller
+ python_versions = [self.python.version] + [target.python.version for target in self.targets if isinstance(target, ControllerConfig)]
+ python_versions = sorted_versions(list(set(python_versions)))
+
+ core_ci = self.wait_for_instance()
+ pwd = self.wait_until_ready()
+
+ display.info(f'Remote working directory: {pwd}', verbosity=1)
+
+ bootstrapper = BootstrapRemote(
+ controller=self.controller,
+ platform=self.config.platform,
+ platform_version=self.config.version,
+ python_versions=python_versions,
+ ssh_key=core_ci.ssh_key,
+ )
+
+ setup_sh = bootstrapper.get_script()
+ shell = setup_sh.splitlines()[0][2:]
+
+ ssh = self.get_origin_controller_connection()
+ ssh.run([shell], data=setup_sh, capture=False)
+
+ def get_ssh_connection(self) -> SshConnection:
+ """Return an SSH connection for accessing the host."""
+ core_ci = self.wait_for_instance()
+
+ settings = SshConnectionDetail(
+ name=core_ci.name,
+ user=core_ci.connection.username,
+ host=core_ci.connection.hostname,
+ port=core_ci.connection.port,
+ identity_file=core_ci.ssh_key.key,
+ python_interpreter=self.python.path,
+ )
+
+ if settings.user == 'root':
+ become: t.Optional[Become] = None
+ elif self.config.become:
+ become = SUPPORTED_BECOME_METHODS[self.config.become]()
+ else:
+ display.warning(f'Defaulting to "sudo" for platform "{self.config.platform}" become support.', unique=True)
+ become = Sudo()
+
+ return SshConnection(self.args, settings, become)
+
+ def wait_until_ready(self) -> str:
+ """Wait for instance to respond to SSH, returning the current working directory once connected."""
+ core_ci = self.wait_for_instance()
+
+ for dummy in range(1, 90):
+ try:
+ return self.get_working_directory()
+ except SubprocessError as ex:
+ # No "Permission denied" check is performed here.
+ # Unlike containers, with remote instances, user configuration isn't guaranteed to have been completed before SSH connections are attempted.
+ display.warning(str(ex))
+ time.sleep(10)
+
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
+
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+ return [self.get_ssh_connection()]
+
+ def get_origin_controller_connection(self) -> SshConnection:
+ """Return a connection for accessing the host as a controller from the origin."""
+ return self.get_ssh_connection()
+
+ def get_working_directory(self) -> str:
+ """Return the working directory for the host."""
+ if not self.pwd:
+ ssh = self.get_origin_controller_connection()
+ stdout = ssh.run(['pwd'], capture=True)[0]
+
+ if self.args.explain:
+ return '/pwd'
+
+ pwd = stdout.strip().splitlines()[-1]
+
+ if not pwd.startswith('/'):
+ raise Exception(f'Unexpected current working directory "{pwd}" from "pwd" command output:\n{stdout.strip()}')
+
+ self.pwd = pwd
+
+ return self.pwd
+
+ @property
+ def pwd(self) -> t.Optional[str]:
+ """Return the cached pwd, if any, otherwise None."""
+ return self.cache.get('pwd')
+
+ @pwd.setter
+ def pwd(self, value: str) -> None:
+ """Cache the given pwd."""
+ self.cache['pwd'] = value
+
+
+class PosixSshProfile(SshTargetHostProfile[PosixSshConfig], PosixProfile[PosixSshConfig]):
+ """Host profile for a POSIX SSH instance."""
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+ settings = SshConnectionDetail(
+ name='target',
+ user=self.config.user,
+ host=self.config.host,
+ port=self.config.port,
+ identity_file=SshKey(self.args).key,
+ python_interpreter=self.python.path,
+ )
+
+ return [SshConnection(self.args, settings)]
+
+
+class WindowsInventoryProfile(SshTargetHostProfile[WindowsInventoryConfig]):
+ """Host profile for a Windows inventory."""
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+ inventory = parse_inventory(self.args, self.config.path)
+ hosts = get_hosts(inventory, 'windows')
+ identity_file = SshKey(self.args).key
+
+ settings = [SshConnectionDetail(
+ name=name,
+ host=config['ansible_host'],
+ port=22,
+ user=config['ansible_user'],
+ identity_file=identity_file,
+ shell_type='powershell',
+ ) for name, config in hosts.items()]
+
+ if settings:
+ details = '\n'.join(f'{ssh.name} {ssh.user}@{ssh.host}:{ssh.port}' for ssh in settings)
+ display.info(f'Generated SSH connection details from inventory:\n{details}', verbosity=1)
+
+ return [SshConnection(self.args, setting) for setting in settings]
+
+
+class WindowsRemoteProfile(RemoteProfile[WindowsRemoteConfig]):
+ """Host profile for a Windows remote instance."""
+ def wait(self) -> None:
+ """Wait for the instance to be ready. Executed before delegation for the controller and after delegation for targets."""
+ self.wait_until_ready()
+
+ def get_inventory_variables(self) -> dict[str, t.Optional[t.Union[str, int]]]:
+ """Return inventory variables for accessing this host."""
+ core_ci = self.wait_for_instance()
+ connection = core_ci.connection
+
+ variables: dict[str, t.Optional[t.Union[str, int]]] = dict(
+ ansible_connection='winrm',
+ ansible_pipelining='yes',
+ ansible_winrm_server_cert_validation='ignore',
+ ansible_host=connection.hostname,
+ ansible_port=connection.port,
+ ansible_user=connection.username,
+ ansible_password=connection.password,
+ ansible_ssh_private_key_file=core_ci.ssh_key.key,
+ )
+
+ # HACK: force 2016 to use NTLM + HTTP message encryption
+ if self.config.version == '2016':
+ variables.update(
+ ansible_winrm_transport='ntlm',
+ ansible_winrm_scheme='http',
+ ansible_port='5985',
+ )
+
+ return variables
+
+ def wait_until_ready(self) -> None:
+ """Wait for the host to respond to an Ansible module request."""
+ core_ci = self.wait_for_instance()
+
+ if not isinstance(self.args, IntegrationConfig):
+ return # skip extended checks unless we're running integration tests
+
+ inventory = Inventory.create_single_host(sanitize_host_name(self.config.name), self.get_inventory_variables())
+ env = ansible_environment(self.args)
+ module_name = 'ansible.windows.win_ping'
+
+ with tempfile.NamedTemporaryFile() as inventory_file:
+ inventory.write(self.args, inventory_file.name)
+
+ cmd = ['ansible', '-m', module_name, '-i', inventory_file.name, 'all']
+
+ for dummy in range(1, 120):
+ try:
+ intercept_python(self.args, self.args.controller_python, cmd, env, capture=True)
+ except SubprocessError as ex:
+ display.warning(str(ex))
+ time.sleep(10)
+ else:
+ return
+
+ raise HostConnectionError(f'Timeout waiting for {self.config.name} instance {core_ci.instance_id}.')
+
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing the host as a target from the controller."""
+ core_ci = self.wait_for_instance()
+
+ settings = SshConnectionDetail(
+ name=core_ci.name,
+ host=core_ci.connection.hostname,
+ port=22,
+ user=core_ci.connection.username,
+ identity_file=core_ci.ssh_key.key,
+ shell_type='powershell',
+ )
+
+ return [SshConnection(self.args, settings)]
+
+
+@cache
+def get_config_profile_type_map() -> dict[t.Type[HostConfig], t.Type[HostProfile]]:
+ """Create and return a mapping of HostConfig types to HostProfile types."""
+ return get_type_map(HostProfile, HostConfig)
+
+
+def create_host_profile(
+ args: EnvironmentConfig,
+ config: HostConfig,
+ controller: bool,
+) -> HostProfile:
+ """Create and return a host profile from the given host configuration."""
+ profile_type = get_config_profile_type_map()[type(config)]
+ profile = profile_type(args=args, config=config, targets=args.targets if controller else None)
+ return profile
diff --git a/test/lib/ansible_test/_internal/http.py b/test/lib/ansible_test/_internal/http.py
new file mode 100644
index 0000000..ca51447
--- /dev/null
+++ b/test/lib/ansible_test/_internal/http.py
@@ -0,0 +1,134 @@
+"""
+Primitive replacement for requests to avoid extra dependency.
+Avoids use of urllib2 due to lack of SNI support.
+"""
+from __future__ import annotations
+
+import json
+import time
+import typing as t
+
+from .util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+)
+
+from .util_common import (
+ CommonConfig,
+ run_command,
+)
+
+
+class HttpClient:
+ """Make HTTP requests via curl."""
+ def __init__(self, args: CommonConfig, always: bool = False, insecure: bool = False, proxy: t.Optional[str] = None) -> None:
+ self.args = args
+ self.always = always
+ self.insecure = insecure
+ self.proxy = proxy
+
+ self.username = None
+ self.password = None
+
+ def get(self, url: str) -> HttpResponse:
+ """Perform an HTTP GET and return the response."""
+ return self.request('GET', url)
+
+ def delete(self, url: str) -> HttpResponse:
+ """Perform an HTTP DELETE and return the response."""
+ return self.request('DELETE', url)
+
+ def put(self, url: str, data: t.Optional[str] = None, headers: t.Optional[dict[str, str]] = None) -> HttpResponse:
+ """Perform an HTTP PUT and return the response."""
+ return self.request('PUT', url, data, headers)
+
+ def request(self, method: str, url: str, data: t.Optional[str] = None, headers: t.Optional[dict[str, str]] = None) -> HttpResponse:
+ """Perform an HTTP request and return the response."""
+ cmd = ['curl', '-s', '-S', '-i', '-X', method]
+
+ if self.insecure:
+ cmd += ['--insecure']
+
+ if headers is None:
+ headers = {}
+
+ headers['Expect'] = '' # don't send expect continue header
+
+ if self.username:
+ if self.password:
+ display.sensitive.add(self.password)
+ cmd += ['-u', '%s:%s' % (self.username, self.password)]
+ else:
+ cmd += ['-u', self.username]
+
+ for header in headers.keys():
+ cmd += ['-H', '%s: %s' % (header, headers[header])]
+
+ if data is not None:
+ cmd += ['-d', data]
+
+ if self.proxy:
+ cmd += ['-x', self.proxy]
+
+ cmd += [url]
+
+ attempts = 0
+ max_attempts = 3
+ sleep_seconds = 3
+
+ # curl error codes which are safe to retry (request never sent to server)
+ retry_on_status = (
+ 6, # CURLE_COULDNT_RESOLVE_HOST
+ )
+
+ stdout = ''
+
+ while True:
+ attempts += 1
+
+ try:
+ stdout = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)[0]
+ break
+ except SubprocessError as ex:
+ if ex.status in retry_on_status and attempts < max_attempts:
+ display.warning('%s' % ex)
+ time.sleep(sleep_seconds)
+ continue
+
+ raise
+
+ if self.args.explain and not self.always:
+ return HttpResponse(method, url, 200, '')
+
+ header, body = stdout.split('\r\n\r\n', 1)
+
+ response_headers = header.split('\r\n')
+ first_line = response_headers[0]
+ http_response = first_line.split(' ')
+ status_code = int(http_response[1])
+
+ return HttpResponse(method, url, status_code, body)
+
+
+class HttpResponse:
+ """HTTP response from curl."""
+ def __init__(self, method: str, url: str, status_code: int, response: str) -> None:
+ self.method = method
+ self.url = url
+ self.status_code = status_code
+ self.response = response
+
+ def json(self) -> t.Any:
+ """Return the response parsed as JSON, raising an exception if parsing fails."""
+ try:
+ return json.loads(self.response)
+ except ValueError:
+ raise HttpError(self.status_code, 'Cannot parse response to %s %s as JSON:\n%s' % (self.method, self.url, self.response))
+
+
+class HttpError(ApplicationError):
+ """HTTP response as an error."""
+ def __init__(self, status: int, message: str) -> None:
+ super().__init__('%s: %s' % (status, message))
+ self.status = status
diff --git a/test/lib/ansible_test/_internal/init.py b/test/lib/ansible_test/_internal/init.py
new file mode 100644
index 0000000..863c258
--- /dev/null
+++ b/test/lib/ansible_test/_internal/init.py
@@ -0,0 +1,15 @@
+"""Early initialization for ansible-test before most other imports have been performed."""
+from __future__ import annotations
+
+import resource
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
+DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
+
+if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
+ resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
+ CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
diff --git a/test/lib/ansible_test/_internal/inventory.py b/test/lib/ansible_test/_internal/inventory.py
new file mode 100644
index 0000000..6abf9ed
--- /dev/null
+++ b/test/lib/ansible_test/_internal/inventory.py
@@ -0,0 +1,175 @@
+"""Inventory creation from host profiles."""
+from __future__ import annotations
+
+import shutil
+import typing as t
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .util import (
+ sanitize_host_name,
+ exclude_none_values,
+)
+
+from .host_profiles import (
+ ControllerHostProfile,
+ ControllerProfile,
+ HostProfile,
+ Inventory,
+ NetworkInventoryProfile,
+ NetworkRemoteProfile,
+ SshTargetHostProfile,
+ WindowsInventoryProfile,
+ WindowsRemoteProfile,
+)
+
+from .ssh import (
+ ssh_options_to_str,
+)
+
+
+def create_controller_inventory(args: EnvironmentConfig, path: str, controller_host: ControllerHostProfile) -> None:
+ """Create and return inventory for use in controller-only integration tests."""
+ inventory = Inventory(
+ host_groups=dict(
+ testgroup=dict(
+ testhost=dict(
+ ansible_connection='local',
+ ansible_pipelining='yes',
+ ansible_python_interpreter=controller_host.python.path,
+ ),
+ ),
+ ),
+ )
+
+ inventory.write(args, path)
+
+
+def create_windows_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None:
+ """Create and return inventory for use in target Windows integration tests."""
+ first = target_hosts[0]
+
+ if isinstance(first, WindowsInventoryProfile):
+ if args.explain:
+ return
+
+ try:
+ shutil.copyfile(first.config.path, path)
+ except shutil.SameFileError:
+ pass
+
+ return
+
+ target_hosts = t.cast(list[WindowsRemoteProfile], target_hosts)
+ hosts = [(target_host, target_host.wait_for_instance().connection) for target_host in target_hosts]
+ windows_hosts = {sanitize_host_name(host.config.name): host.get_inventory_variables() for host, connection in hosts}
+
+ inventory = Inventory(
+ host_groups=dict(
+ windows=windows_hosts,
+ ),
+ # The `testhost` group is needed to support the `binary_modules_winrm` integration test.
+ # The test should be updated to remove the need for this.
+ extra_groups={
+ 'testhost:children': [
+ 'windows',
+ ],
+ },
+ )
+
+ inventory.write(args, path)
+
+
+def create_network_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile]) -> None:
+ """Create and return inventory for use in target network integration tests."""
+ first = target_hosts[0]
+
+ if isinstance(first, NetworkInventoryProfile):
+ if args.explain:
+ return
+
+ try:
+ shutil.copyfile(first.config.path, path)
+ except shutil.SameFileError:
+ pass
+
+ return
+
+ target_hosts = t.cast(list[NetworkRemoteProfile], target_hosts)
+ host_groups: dict[str, dict[str, dict[str, t.Union[str, int]]]] = {target_host.config.platform: {} for target_host in target_hosts}
+
+ for target_host in target_hosts:
+ host_groups[target_host.config.platform][sanitize_host_name(target_host.config.name)] = target_host.get_inventory_variables()
+
+ inventory = Inventory(
+ host_groups=host_groups,
+ # The `net` group was added to support platform agnostic testing. It may not longer be needed.
+ # see: https://github.com/ansible/ansible/pull/34661
+ # see: https://github.com/ansible/ansible/pull/34707
+ extra_groups={
+ 'net:children': sorted(host_groups),
+ },
+ )
+
+ inventory.write(args, path)
+
+
+def create_posix_inventory(args: EnvironmentConfig, path: str, target_hosts: list[HostProfile], needs_ssh: bool = False) -> None:
+ """Create and return inventory for use in POSIX integration tests."""
+ target_hosts = t.cast(list[SshTargetHostProfile], target_hosts)
+
+ if len(target_hosts) != 1:
+ raise Exception()
+
+ target_host = target_hosts[0]
+
+ if isinstance(target_host, ControllerProfile) and not needs_ssh:
+ inventory = Inventory(
+ host_groups=dict(
+ testgroup=dict(
+ testhost=dict(
+ ansible_connection='local',
+ ansible_pipelining='yes',
+ ansible_python_interpreter=target_host.python.path,
+ ),
+ ),
+ ),
+ )
+ else:
+ connections = target_host.get_controller_target_connections()
+
+ if len(connections) != 1:
+ raise Exception()
+
+ ssh = connections[0]
+
+ testhost: dict[str, t.Optional[t.Union[str, int]]] = dict(
+ ansible_connection='ssh',
+ ansible_pipelining='yes',
+ ansible_python_interpreter=ssh.settings.python_interpreter,
+ ansible_host=ssh.settings.host,
+ ansible_port=ssh.settings.port,
+ ansible_user=ssh.settings.user,
+ ansible_ssh_private_key_file=ssh.settings.identity_file,
+ ansible_ssh_extra_args=ssh_options_to_str(ssh.settings.options),
+ )
+
+ if ssh.become:
+ testhost.update(
+ ansible_become='yes',
+ ansible_become_method=ssh.become.method,
+ )
+
+ testhost = exclude_none_values(testhost)
+
+ inventory = Inventory(
+ host_groups=dict(
+ testgroup=dict(
+ testhost=testhost,
+ ),
+ ),
+ )
+
+ inventory.write(args, path)
diff --git a/test/lib/ansible_test/_internal/io.py b/test/lib/ansible_test/_internal/io.py
new file mode 100644
index 0000000..80d4769
--- /dev/null
+++ b/test/lib/ansible_test/_internal/io.py
@@ -0,0 +1,88 @@
+"""Functions for disk IO."""
+from __future__ import annotations
+
+import io
+import json
+import os
+import typing as t
+
+from .encoding import (
+ ENCODING,
+ to_bytes,
+ to_text,
+)
+
+
+def read_json_file(path: str) -> t.Any:
+ """Parse and return the json content from the specified path."""
+ return json.loads(read_text_file(path))
+
+
+def read_text_file(path: str) -> str:
+ """Return the contents of the specified path as text."""
+ return to_text(read_binary_file(path))
+
+
+def read_binary_file(path: str) -> bytes:
+ """Return the contents of the specified path as bytes."""
+ with open_binary_file(path) as file_obj:
+ return file_obj.read()
+
+
+def make_dirs(path: str) -> None:
+ """Create a directory at path, including any necessary parent directories."""
+ os.makedirs(to_bytes(path), exist_ok=True)
+
+
+def write_json_file(path: str,
+ content: t.Any,
+ create_directories: bool = False,
+ formatted: bool = True,
+ encoder: t.Optional[t.Type[json.JSONEncoder]] = None,
+ ) -> str:
+ """Write the given json content to the specified path, optionally creating missing directories."""
+ text_content = json.dumps(content,
+ sort_keys=formatted,
+ indent=4 if formatted else None,
+ separators=(', ', ': ') if formatted else (',', ':'),
+ cls=encoder,
+ ) + '\n'
+
+ write_text_file(path, text_content, create_directories=create_directories)
+
+ return text_content
+
+
+def write_text_file(path: str, content: str, create_directories: bool = False) -> None:
+ """Write the given text content to the specified path, optionally creating missing directories."""
+ if create_directories:
+ make_dirs(os.path.dirname(path))
+
+ with open_binary_file(path, 'wb') as file_obj:
+ file_obj.write(to_bytes(content))
+
+
+def open_text_file(path: str, mode: str = 'r') -> t.IO[str]:
+ """Open the given path for text access."""
+ if 'b' in mode:
+ raise Exception('mode cannot include "b" for text files: %s' % mode)
+
+ return io.open(to_bytes(path), mode, encoding=ENCODING) # pylint: disable=consider-using-with
+
+
+def open_binary_file(path: str, mode: str = 'rb') -> t.IO[bytes]:
+ """Open the given path for binary access."""
+ if 'b' not in mode:
+ raise Exception('mode must include "b" for binary files: %s' % mode)
+
+ return io.open(to_bytes(path), mode) # pylint: disable=consider-using-with
+
+
+class SortedSetEncoder(json.JSONEncoder):
+ """Encode sets as sorted lists."""
+ def default(self, o: t.Any) -> t.Any:
+ """Return a serialized version of the `o` object."""
+ if isinstance(o, set):
+ return sorted(o)
+
+ return json.JSONEncoder.default(self, o)
diff --git a/test/lib/ansible_test/_internal/junit_xml.py b/test/lib/ansible_test/_internal/junit_xml.py
new file mode 100644
index 0000000..3b95867
--- /dev/null
+++ b/test/lib/ansible_test/_internal/junit_xml.py
@@ -0,0 +1,267 @@
+"""
+Dataclasses for creating JUnit XML files.
+See: https://github.com/junit-team/junit5/blob/main/platform-tests/src/test/resources/jenkins-junit.xsd
+"""
+from __future__ import annotations
+
+import abc
+import dataclasses
+import datetime
+import decimal
+
+from xml.dom import minidom
+# noinspection PyPep8Naming
+from xml.etree import ElementTree as ET
+
+
+@dataclasses.dataclass # type: ignore[misc] # https://github.com/python/mypy/issues/5374
+class TestResult(metaclass=abc.ABCMeta):
+ """Base class for the result of a test case."""
+ output: str | None = None
+ message: str | None = None
+ type: str | None = None
+
+ def __post_init__(self):
+ if self.type is None:
+ self.type = self.tag
+
+ @property
+ @abc.abstractmethod
+ def tag(self) -> str:
+ """Tag name for the XML element created by this result type."""
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ message=self.message,
+ type=self.type,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element(self.tag, self.get_attributes())
+ element.text = self.output
+
+ return element
+
+
+@dataclasses.dataclass
+class TestFailure(TestResult):
+ """Failure info for a test case."""
+ @property
+ def tag(self) -> str:
+ """Tag name for the XML element created by this result type."""
+ return 'failure'
+
+
+@dataclasses.dataclass
+class TestError(TestResult):
+ """Error info for a test case."""
+ @property
+ def tag(self) -> str:
+ """Tag name for the XML element created by this result type."""
+ return 'error'
+
+
+@dataclasses.dataclass
+class TestCase:
+ """An individual test case."""
+ name: str
+ assertions: int | None = None
+ classname: str | None = None
+ status: str | None = None
+ time: decimal.Decimal | None = None
+
+ errors: list[TestError] = dataclasses.field(default_factory=list)
+ failures: list[TestFailure] = dataclasses.field(default_factory=list)
+ skipped: str | None = None
+ system_out: str | None = None
+ system_err: str | None = None
+
+ is_disabled: bool = False
+
+ @property
+ def is_failure(self) -> bool:
+ """True if the test case contains failure info."""
+ return bool(self.failures)
+
+ @property
+ def is_error(self) -> bool:
+ """True if the test case contains error info."""
+ return bool(self.errors)
+
+ @property
+ def is_skipped(self) -> bool:
+ """True if the test case was skipped."""
+ return bool(self.skipped)
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ assertions=self.assertions,
+ classname=self.classname,
+ name=self.name,
+ status=self.status,
+ time=self.time,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element('testcase', self.get_attributes())
+
+ if self.skipped:
+ ET.SubElement(element, 'skipped').text = self.skipped
+
+ element.extend([error.get_xml_element() for error in self.errors])
+ element.extend([failure.get_xml_element() for failure in self.failures])
+
+ if self.system_out:
+ ET.SubElement(element, 'system-out').text = self.system_out
+
+ if self.system_err:
+ ET.SubElement(element, 'system-err').text = self.system_err
+
+ return element
+
+
+@dataclasses.dataclass
+class TestSuite:
+ """A collection of test cases."""
+ name: str
+ hostname: str | None = None
+ id: str | None = None
+ package: str | None = None
+ timestamp: datetime.datetime | None = None
+
+ properties: dict[str, str] = dataclasses.field(default_factory=dict)
+ cases: list[TestCase] = dataclasses.field(default_factory=list)
+ system_out: str | None = None
+ system_err: str | None = None
+
+ @property
+ def disabled(self) -> int:
+ """The number of disabled test cases."""
+ return sum(case.is_disabled for case in self.cases)
+
+ @property
+ def errors(self) -> int:
+ """The number of test cases containing error info."""
+ return sum(case.is_error for case in self.cases)
+
+ @property
+ def failures(self) -> int:
+ """The number of test cases containing failure info."""
+ return sum(case.is_failure for case in self.cases)
+
+ @property
+ def skipped(self) -> int:
+ """The number of test cases containing skipped info."""
+ return sum(case.is_skipped for case in self.cases)
+
+ @property
+ def tests(self) -> int:
+ """The number of test cases."""
+ return len(self.cases)
+
+ @property
+ def time(self) -> decimal.Decimal:
+ """The total time from all test cases."""
+ return decimal.Decimal(sum(case.time for case in self.cases if case.time))
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ disabled=self.disabled,
+ errors=self.errors,
+ failures=self.failures,
+ hostname=self.hostname,
+ id=self.id,
+ name=self.name,
+ package=self.package,
+ skipped=self.skipped,
+ tests=self.tests,
+ time=self.time,
+ timestamp=self.timestamp.isoformat(timespec='seconds') if self.timestamp else None,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element('testsuite', self.get_attributes())
+
+ if self.properties:
+ ET.SubElement(element, 'properties').extend([ET.Element('property', dict(name=name, value=value)) for name, value in self.properties.items()])
+
+ element.extend([test_case.get_xml_element() for test_case in self.cases])
+
+ if self.system_out:
+ ET.SubElement(element, 'system-out').text = self.system_out
+
+ if self.system_err:
+ ET.SubElement(element, 'system-err').text = self.system_err
+
+ return element
+
+
+@dataclasses.dataclass
+class TestSuites:
+ """A collection of test suites."""
+ name: str | None = None
+
+ suites: list[TestSuite] = dataclasses.field(default_factory=list)
+
+ @property
+ def disabled(self) -> int:
+ """The number of disabled test cases."""
+ return sum(suite.disabled for suite in self.suites)
+
+ @property
+ def errors(self) -> int:
+ """The number of test cases containing error info."""
+ return sum(suite.errors for suite in self.suites)
+
+ @property
+ def failures(self) -> int:
+ """The number of test cases containing failure info."""
+ return sum(suite.failures for suite in self.suites)
+
+ @property
+ def tests(self) -> int:
+ """The number of test cases."""
+ return sum(suite.tests for suite in self.suites)
+
+ @property
+ def time(self) -> decimal.Decimal:
+ """The total time from all test cases."""
+ return decimal.Decimal(sum(suite.time for suite in self.suites))
+
+ def get_attributes(self) -> dict[str, str]:
+ """Return a dictionary of attributes for this instance."""
+ return _attributes(
+ disabled=self.disabled,
+ errors=self.errors,
+ failures=self.failures,
+ name=self.name,
+ tests=self.tests,
+ time=self.time,
+ )
+
+ def get_xml_element(self) -> ET.Element:
+ """Return an XML element representing this instance."""
+ element = ET.Element('testsuites', self.get_attributes())
+ element.extend([suite.get_xml_element() for suite in self.suites])
+
+ return element
+
+ def to_pretty_xml(self) -> str:
+ """Return a pretty formatted XML string representing this instance."""
+ return _pretty_xml(self.get_xml_element())
+
+
+def _attributes(**kwargs) -> dict[str, str]:
+ """Return the given kwargs as a dictionary with values converted to strings. Items with a value of None will be omitted."""
+ return {key: str(value) for key, value in kwargs.items() if value is not None}
+
+
+def _pretty_xml(element: ET.Element) -> str:
+ """Return a pretty formatted XML string representing the given element."""
+ return minidom.parseString(ET.tostring(element, encoding='unicode')).toprettyxml()
diff --git a/test/lib/ansible_test/_internal/locale_util.py b/test/lib/ansible_test/_internal/locale_util.py
new file mode 100644
index 0000000..3fb74ad
--- /dev/null
+++ b/test/lib/ansible_test/_internal/locale_util.py
@@ -0,0 +1,61 @@
+"""Initialize locale settings. This must be imported very early in ansible-test startup."""
+
+from __future__ import annotations
+
+import locale
+import sys
+import typing as t
+
+STANDARD_LOCALE = 'en_US.UTF-8'
+"""
+The standard locale used by ansible-test and its subprocesses and delegated instances.
+"""
+
+FALLBACK_LOCALE = 'C.UTF-8'
+"""
+The fallback locale to use when the standard locale is not available.
+This was added in ansible-core 2.14 to allow testing in environments without the standard locale.
+It was not needed in previous ansible-core releases since they do not verify the locale during startup.
+"""
+
+
+class LocaleError(SystemExit):
+ """Exception to raise when locale related errors occur."""
+ def __init__(self, message: str) -> None:
+ super().__init__(f'ERROR: {message}')
+
+
+def configure_locale() -> tuple[str, t.Optional[str]]:
+ """Configure the locale, returning the selected locale and an optional warning."""
+
+ if (fs_encoding := sys.getfilesystemencoding()).lower() != 'utf-8':
+ raise LocaleError(f'ansible-test requires the filesystem encoding to be UTF-8, but "{fs_encoding}" was detected.')
+
+ candidate_locales = STANDARD_LOCALE, FALLBACK_LOCALE
+
+ errors: dict[str, str] = {}
+ warning: t.Optional[str] = None
+ configured_locale: t.Optional[str] = None
+
+ for candidate_locale in candidate_locales:
+ try:
+ locale.setlocale(locale.LC_ALL, candidate_locale)
+ locale.getlocale()
+ except (locale.Error, ValueError) as ex:
+ errors[candidate_locale] = str(ex)
+ else:
+ configured_locale = candidate_locale
+ break
+
+ if not configured_locale:
+ raise LocaleError('ansible-test could not initialize a supported locale:\n' +
+ '\n'.join(f'{key}: {value}' for key, value in errors.items()))
+
+ if configured_locale != STANDARD_LOCALE:
+ warning = (f'Using locale "{configured_locale}" instead of "{STANDARD_LOCALE}". '
+ 'Tests which depend on the locale may behave unexpectedly.')
+
+ return configured_locale, warning
+
+
+CONFIGURED_LOCALE, LOCALE_WARNING = configure_locale()
diff --git a/test/lib/ansible_test/_internal/metadata.py b/test/lib/ansible_test/_internal/metadata.py
new file mode 100644
index 0000000..94bbc34
--- /dev/null
+++ b/test/lib/ansible_test/_internal/metadata.py
@@ -0,0 +1,125 @@
+"""Test metadata for passing data to delegated tests."""
+from __future__ import annotations
+import typing as t
+
+from .util import (
+ display,
+)
+
+from .io import (
+ write_json_file,
+ read_json_file,
+)
+
+from .diff import (
+ parse_diff,
+ FileDiff,
+)
+
+
+class Metadata:
+ """Metadata object for passing data to delegated tests."""
+ def __init__(self) -> None:
+ """Initialize metadata."""
+ self.changes: dict[str, tuple[tuple[int, int], ...]] = {}
+ self.cloud_config: t.Optional[dict[str, dict[str, t.Union[int, str, bool]]]] = None
+ self.change_description: t.Optional[ChangeDescription] = None
+ self.ci_provider: t.Optional[str] = None
+
+ def populate_changes(self, diff: t.Optional[list[str]]) -> None:
+ """Populate the changeset using the given diff."""
+ patches = parse_diff(diff)
+ patches: list[FileDiff] = sorted(patches, key=lambda k: k.new.path)
+
+ self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches)
+
+ renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists]
+ deletes = [patch.old.path for patch in patches if not patch.new.exists]
+
+ # make sure old paths which were renamed or deleted are registered in changes
+ for path in renames + deletes:
+ if path in self.changes:
+ # old path was replaced with another file
+ continue
+
+ # failed tests involving deleted files should be using line 0 since there is no content remaining
+ self.changes[path] = ((0, 0),)
+
+ def to_dict(self) -> dict[str, t.Any]:
+ """Return a dictionary representation of the metadata."""
+ return dict(
+ changes=self.changes,
+ cloud_config=self.cloud_config,
+ ci_provider=self.ci_provider,
+ change_description=self.change_description.to_dict(),
+ )
+
+ def to_file(self, path: str) -> None:
+ """Write the metadata to the specified file."""
+ data = self.to_dict()
+
+ display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3)
+
+ write_json_file(path, data)
+
+ @staticmethod
+ def from_file(path: str) -> Metadata:
+ """Return metadata loaded from the specified file."""
+ data = read_json_file(path)
+ return Metadata.from_dict(data)
+
+ @staticmethod
+ def from_dict(data: dict[str, t.Any]) -> Metadata:
+ """Return metadata loaded from the specified dictionary."""
+ metadata = Metadata()
+ metadata.changes = data['changes']
+ metadata.cloud_config = data['cloud_config']
+ metadata.ci_provider = data['ci_provider']
+ metadata.change_description = ChangeDescription.from_dict(data['change_description'])
+
+ return metadata
+
+
+class ChangeDescription:
+ """Description of changes."""
+ def __init__(self) -> None:
+ self.command: str = ''
+ self.changed_paths: list[str] = []
+ self.deleted_paths: list[str] = []
+ self.regular_command_targets: dict[str, list[str]] = {}
+ self.focused_command_targets: dict[str, list[str]] = {}
+ self.no_integration_paths: list[str] = []
+
+ @property
+ def targets(self) -> t.Optional[list[str]]:
+ """Optional list of target names."""
+ return self.regular_command_targets.get(self.command)
+
+ @property
+ def focused_targets(self) -> t.Optional[list[str]]:
+ """Optional list of focused target names."""
+ return self.focused_command_targets.get(self.command)
+
+ def to_dict(self) -> dict[str, t.Any]:
+ """Return a dictionary representation of the change description."""
+ return dict(
+ command=self.command,
+ changed_paths=self.changed_paths,
+ deleted_paths=self.deleted_paths,
+ regular_command_targets=self.regular_command_targets,
+ focused_command_targets=self.focused_command_targets,
+ no_integration_paths=self.no_integration_paths,
+ )
+
+ @staticmethod
+ def from_dict(data: dict[str, t.Any]) -> ChangeDescription:
+ """Return a change description loaded from the given dictionary."""
+ changes = ChangeDescription()
+ changes.command = data['command']
+ changes.changed_paths = data['changed_paths']
+ changes.deleted_paths = data['deleted_paths']
+ changes.regular_command_targets = data['regular_command_targets']
+ changes.focused_command_targets = data['focused_command_targets']
+ changes.no_integration_paths = data['no_integration_paths']
+
+ return changes
diff --git a/test/lib/ansible_test/_internal/payload.py b/test/lib/ansible_test/_internal/payload.py
new file mode 100644
index 0000000..94150cb
--- /dev/null
+++ b/test/lib/ansible_test/_internal/payload.py
@@ -0,0 +1,132 @@
+"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
+from __future__ import annotations
+
+import atexit
+import os
+import stat
+import tarfile
+import tempfile
+import time
+import typing as t
+
+from .constants import (
+ ANSIBLE_BIN_SYMLINK_MAP,
+)
+
+from .config import (
+ IntegrationConfig,
+ ShellConfig,
+)
+
+from .util import (
+ display,
+ ANSIBLE_SOURCE_ROOT,
+ remove_tree,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ CommonConfig,
+)
+
+# improve performance by disabling uid/gid lookups
+tarfile.pwd = None # type: ignore[attr-defined] # undocumented attribute
+tarfile.grp = None # type: ignore[attr-defined] # undocumented attribute
+
+
+def create_payload(args: CommonConfig, dst_path: str) -> None:
+ """Create a payload for delegation."""
+ if args.explain:
+ return
+
+ files = list(data_context().ansible_source)
+ filters = {}
+
+ def make_executable(tar_info: tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]:
+ """Make the given file executable."""
+ tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
+ return tar_info
+
+ if not ANSIBLE_SOURCE_ROOT:
+ # reconstruct the bin directory which is not available when running from an ansible install
+ files.extend(create_temporary_bin_files(args))
+ filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
+
+ if not data_context().content.is_ansible:
+ # exclude unnecessary files when not testing ansible itself
+ files = [f for f in files if
+ is_subdir(f[1], 'bin/') or
+ is_subdir(f[1], 'lib/ansible/') or
+ is_subdir(f[1], 'test/lib/ansible_test/')]
+
+ if not isinstance(args, (ShellConfig, IntegrationConfig)):
+ # exclude built-in ansible modules when they are not needed
+ files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
+
+ collection_layouts = data_context().create_collection_layouts()
+
+ content_files: list[tuple[str, str]] = []
+ extra_files: list[tuple[str, str]] = []
+
+ for layout in collection_layouts:
+ if layout == data_context().content:
+ # include files from the current collection (layout.collection.directory will be added later)
+ content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files())
+ else:
+ # include files from each collection in the same collection root as the content being tested
+ extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
+ else:
+ # when testing ansible itself the ansible source is the content
+ content_files = files
+ # there are no extra files when testing ansible itself
+ extra_files = []
+
+ for callback in data_context().payload_callbacks:
+ # execute callbacks only on the content paths
+ # this is done before placing them in the appropriate subdirectory (see below)
+ callback(content_files)
+
+ # place ansible source files under the 'ansible' directory on the delegated host
+ files = [(src, os.path.join('ansible', dst)) for src, dst in files]
+
+ if data_context().content.collection:
+ # place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host
+ files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files)
+ # extra files already have the correct destination path
+ files.extend(extra_files)
+
+ # maintain predictable file order
+ files = sorted(set(files))
+
+ display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
+
+ start = time.time()
+
+ with tarfile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar:
+ for src, dst in files:
+ display.info('%s -> %s' % (src, dst), verbosity=4)
+ tar.add(src, dst, filter=filters.get(dst))
+
+ duration = time.time() - start
+ payload_size_bytes = os.path.getsize(dst_path)
+
+ display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
+
+
+def create_temporary_bin_files(args: CommonConfig) -> tuple[tuple[str, str], ...]:
+ """Create a temporary ansible bin directory populated using the symlink map."""
+ if args.explain:
+ temp_path = '/tmp/ansible-tmp-bin'
+ else:
+ temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
+ atexit.register(remove_tree, temp_path)
+
+ for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
+ path = os.path.join(temp_path, name)
+ os.symlink(dest, path)
+
+ return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
diff --git a/test/lib/ansible_test/_internal/provider/__init__.py b/test/lib/ansible_test/_internal/provider/__init__.py
new file mode 100644
index 0000000..61d7baf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/__init__.py
@@ -0,0 +1,72 @@
+"""Provider (plugin) infrastructure for ansible-test."""
+from __future__ import annotations
+
+import abc
+import os
+import typing as t
+
+from ..util import (
+ ApplicationError,
+ get_subclasses,
+)
+
+
+def get_path_provider_classes(provider_type: t.Type[TPathProvider]) -> list[t.Type[TPathProvider]]:
+ """Return a list of path provider classes of the given type."""
+ return sorted(get_subclasses(provider_type), key=lambda subclass: (subclass.priority, subclass.__name__))
+
+
+def find_path_provider(provider_type: t.Type[TPathProvider],
+ provider_classes: list[t.Type[TPathProvider]],
+ path: str,
+ walk: bool,
+ ) -> TPathProvider:
+ """Return the first found path provider of the given type for the given path."""
+ sequences = sorted(set(pc.sequence for pc in provider_classes if pc.sequence > 0))
+
+ for sequence in sequences:
+ candidate_path = path
+ tier_classes = [pc for pc in provider_classes if pc.sequence == sequence]
+
+ while True:
+ for provider_class in tier_classes:
+ if provider_class.is_content_root(candidate_path):
+ return provider_class(candidate_path)
+
+ if not walk:
+ break
+
+ parent_path = os.path.dirname(candidate_path)
+
+ if parent_path == candidate_path:
+ break
+
+ candidate_path = parent_path
+
+ raise ProviderNotFoundForPath(provider_type, path)
+
+
+class ProviderNotFoundForPath(ApplicationError):
+ """Exception generated when a path based provider cannot be found for a given path."""
+ def __init__(self, provider_type: t.Type, path: str) -> None:
+ super().__init__('No %s found for path: %s' % (provider_type.__name__, path))
+
+ self.provider_type = provider_type
+ self.path = path
+
+
+class PathProvider(metaclass=abc.ABCMeta):
+ """Base class for provider plugins that are path based."""
+ sequence = 500
+ priority = 500
+
+ def __init__(self, root: str) -> None:
+ self.root = root
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+
+
+TPathProvider = t.TypeVar('TPathProvider', bound=PathProvider)
diff --git a/test/lib/ansible_test/_internal/provider/layout/__init__.py b/test/lib/ansible_test/_internal/provider/layout/__init__.py
new file mode 100644
index 0000000..aa6693f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/__init__.py
@@ -0,0 +1,236 @@
+"""Code for finding content."""
+from __future__ import annotations
+
+import abc
+import collections
+import os
+import typing as t
+
+from ...util import (
+ ANSIBLE_SOURCE_ROOT,
+)
+
+from .. import (
+ PathProvider,
+)
+
+
+class Layout:
+ """Description of content locations and helper methods to access content."""
+ def __init__(self,
+ root: str,
+ paths: list[str],
+ ) -> None:
+ self.root = root
+
+ self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
+ self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
+ self.__paths_tree = paths_to_tree(self.__paths)
+ self.__files_tree = paths_to_tree(self.__files)
+
+ def all_files(self, include_symlinked_directories: bool = False) -> list[str]:
+ """Return a list of all file paths."""
+ if include_symlinked_directories:
+ return self.__paths
+
+ return self.__files
+
+ def walk_files(self, directory: str, include_symlinked_directories: bool = False) -> list[str]:
+ """Return a list of file paths found recursively under the given directory."""
+ if include_symlinked_directories:
+ tree = self.__paths_tree
+ else:
+ tree = self.__files_tree
+
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(tree, parts)
+
+ if not item:
+ return []
+
+ directories = collections.deque(item[0].values())
+
+ files = list(item[1])
+
+ while directories:
+ item = directories.pop()
+ directories.extend(item[0].values())
+ files.extend(item[1])
+
+ return files
+
+ def get_dirs(self, directory: str) -> list[str]:
+ """Return a list directory paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return [os.path.join(directory, key) for key in item[0].keys()] if item else []
+
+ def get_files(self, directory: str) -> list[str]:
+ """Return a list of file paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return item[1] if item else []
+
+
+class ContentLayout(Layout):
+ """Information about the current Ansible content being tested."""
+ def __init__(self,
+ root: str,
+ paths: list[str],
+ plugin_paths: dict[str, str],
+ collection: t.Optional[CollectionDetail],
+ test_path: str,
+ results_path: str,
+ sanity_path: str,
+ sanity_messages: t.Optional[LayoutMessages],
+ integration_path: str,
+ integration_targets_path: str,
+ integration_vars_path: str,
+ integration_messages: t.Optional[LayoutMessages],
+ unit_path: str,
+ unit_module_path: str,
+ unit_module_utils_path: str,
+ unit_messages: t.Optional[LayoutMessages],
+ unsupported: bool = False,
+ ) -> None:
+ super().__init__(root, paths)
+
+ self.plugin_paths = plugin_paths
+ self.collection = collection
+ self.test_path = test_path
+ self.results_path = results_path
+ self.sanity_path = sanity_path
+ self.sanity_messages = sanity_messages
+ self.integration_path = integration_path
+ self.integration_targets_path = integration_targets_path
+ self.integration_vars_path = integration_vars_path
+ self.integration_messages = integration_messages
+ self.unit_path = unit_path
+ self.unit_module_path = unit_module_path
+ self.unit_module_utils_path = unit_module_utils_path
+ self.unit_messages = unit_messages
+ self.unsupported = unsupported
+
+ self.is_ansible = root == ANSIBLE_SOURCE_ROOT
+
+ @property
+ def prefix(self) -> str:
+ """Return the collection prefix or an empty string if not a collection."""
+ if self.collection:
+ return self.collection.prefix
+
+ return ''
+
+ @property
+ def module_path(self) -> t.Optional[str]:
+ """Return the path where modules are found, if any."""
+ return self.plugin_paths.get('modules')
+
+ @property
+ def module_utils_path(self) -> t.Optional[str]:
+ """Return the path where module_utils are found, if any."""
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_powershell_path(self) -> t.Optional[str]:
+ """Return the path where powershell module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'powershell')
+
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_csharp_path(self) -> t.Optional[str]:
+ """Return the path where csharp module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'csharp')
+
+ return self.plugin_paths.get('module_utils')
+
+
+class LayoutMessages:
+ """Messages generated during layout creation that should be deferred for later display."""
+ def __init__(self) -> None:
+ self.info: list[str] = []
+ self.warning: list[str] = []
+ self.error: list[str] = []
+
+
+class CollectionDetail:
+ """Details about the layout of the current collection."""
+ def __init__(self,
+ name: str,
+ namespace: str,
+ root: str,
+ ) -> None:
+ self.name = name
+ self.namespace = namespace
+ self.root = root
+ self.full_name = '%s.%s' % (namespace, name)
+ self.prefix = '%s.' % self.full_name
+ self.directory = os.path.join('ansible_collections', namespace, name)
+
+
+class LayoutProvider(PathProvider):
+ """Base class for layout providers."""
+ PLUGIN_TYPES = (
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'doc_fragments',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'module_utils',
+ 'modules',
+ 'netconf',
+ 'shell',
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ # The following are plugin directories not directly supported by ansible-core, but used in collections
+ # (https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst#modules--plugins)
+ 'plugin_utils',
+ 'sub_plugins',
+ )
+
+ @abc.abstractmethod
+ def create(self, root: str, paths: list[str]) -> ContentLayout:
+ """Create a layout using the given root and paths."""
+
+
+def paths_to_tree(paths: list[str]) -> tuple[dict[str, t.Any], list[str]]:
+ """Return a filesystem tree from the given list of paths."""
+ tree: tuple[dict[str, t.Any], list[str]] = {}, []
+
+ for path in paths:
+ parts = path.split(os.path.sep)
+ root = tree
+
+ for part in parts[:-1]:
+ if part not in root[0]:
+ root[0][part] = {}, []
+
+ root = root[0][part]
+
+ root[1].append(path)
+
+ return tree
+
+
+def get_tree_item(tree: tuple[dict[str, t.Any], list[str]], parts: list[str]) -> t.Optional[tuple[dict[str, t.Any], list[str]]]:
+ """Return the portion of the tree found under the path given by parts, or None if it does not exist."""
+ root = tree
+
+ for part in parts:
+ root = root[0].get(part)
+
+ if not root:
+ return None
+
+ return root
diff --git a/test/lib/ansible_test/_internal/provider/layout/ansible.py b/test/lib/ansible_test/_internal/provider/layout/ansible.py
new file mode 100644
index 0000000..e8d0191
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/ansible.py
@@ -0,0 +1,44 @@
+"""Layout provider for Ansible source."""
+from __future__ import annotations
+
+import os
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class AnsibleLayout(LayoutProvider):
+ """Layout provider for Ansible source."""
+ @staticmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, 'setup.py')) and os.path.exists(os.path.join(path, 'bin/ansible-test'))
+
+ def create(self, root: str, paths: list[str]) -> ContentLayout:
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('lib/ansible/plugins', p)) for p in self.PLUGIN_TYPES)
+
+ plugin_paths.update(dict(
+ modules='lib/ansible/modules',
+ module_utils='lib/ansible/module_utils',
+ ))
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=None,
+ test_path='test',
+ results_path='test/results',
+ sanity_path='test/sanity',
+ sanity_messages=None,
+ integration_path='test/integration',
+ integration_targets_path='test/integration/targets',
+ integration_vars_path='test/integration/integration_config.yml',
+ integration_messages=None,
+ unit_path='test/units',
+ unit_module_path='test/units/modules',
+ unit_module_utils_path='test/units/module_utils',
+ unit_messages=None,
+ )
diff --git a/test/lib/ansible_test/_internal/provider/layout/collection.py b/test/lib/ansible_test/_internal/provider/layout/collection.py
new file mode 100644
index 0000000..299d0bc
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/collection.py
@@ -0,0 +1,126 @@
+"""Layout provider for Ansible collections."""
+from __future__ import annotations
+
+import os
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+ CollectionDetail,
+ LayoutMessages,
+)
+
+from ...util import (
+ is_valid_identifier,
+)
+
+
+class CollectionLayout(LayoutProvider):
+ """Layout provider for Ansible collections."""
+ @staticmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+ if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections':
+ return True
+
+ return False
+
+ def create(self, root: str, paths: list[str]) -> ContentLayout:
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES)
+
+ collection_root = os.path.dirname(os.path.dirname(root))
+ collection_dir = os.path.relpath(root, collection_root)
+
+ collection_namespace: str
+ collection_name: str
+
+ collection_namespace, collection_name = collection_dir.split(os.path.sep)
+
+ collection_root = os.path.dirname(collection_root)
+
+ sanity_messages = LayoutMessages()
+ integration_messages = LayoutMessages()
+ unit_messages = LayoutMessages()
+
+ # these apply to all test commands
+ self.__check_test_path(paths, sanity_messages)
+ self.__check_test_path(paths, integration_messages)
+ self.__check_test_path(paths, unit_messages)
+
+ # these apply to specific test commands
+ integration_targets_path = self.__check_integration_path(paths, integration_messages)
+ self.__check_unit_path(paths, unit_messages)
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=CollectionDetail(
+ name=collection_name,
+ namespace=collection_namespace,
+ root=collection_root,
+ ),
+ test_path='tests',
+ results_path='tests/output',
+ sanity_path='tests/sanity',
+ sanity_messages=sanity_messages,
+ integration_path='tests/integration',
+ integration_targets_path=integration_targets_path.rstrip(os.path.sep),
+ integration_vars_path='tests/integration/integration_config.yml',
+ integration_messages=integration_messages,
+ unit_path='tests/unit',
+ unit_module_path='tests/unit/plugins/modules',
+ unit_module_utils_path='tests/unit/plugins/module_utils',
+ unit_messages=unit_messages,
+ unsupported=not (is_valid_identifier(collection_namespace) and is_valid_identifier(collection_name)),
+ )
+
+ @staticmethod
+ def __check_test_path(paths: list[str], messages: LayoutMessages) -> None:
+ modern_test_path = 'tests/'
+ modern_test_path_found = any(path.startswith(modern_test_path) for path in paths)
+ legacy_test_path = 'test/'
+ legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths)
+
+ if modern_test_path_found and legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path))
+ elif legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path))
+
+ @staticmethod
+ def __check_integration_path(paths: list[str], messages: LayoutMessages) -> str:
+ modern_integration_path = 'roles/test/'
+ modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths)
+ legacy_integration_path = 'tests/integration/targets/'
+ legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths)
+
+ if modern_integration_path_found and legacy_integration_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = modern_integration_path
+ elif legacy_integration_path_found:
+ messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = legacy_integration_path
+ elif modern_integration_path_found:
+ messages.info.append('Loading tests from "%s".' % modern_integration_path)
+ integration_targets_path = modern_integration_path
+ else:
+ messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path))
+ integration_targets_path = modern_integration_path
+
+ return integration_targets_path
+
+ @staticmethod
+ def __check_unit_path(paths: list[str], messages: LayoutMessages) -> None:
+ modern_unit_path = 'tests/unit/'
+ modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths)
+ legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/
+ legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths)
+
+ if modern_unit_path_found and legacy_unit_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path))
+ elif legacy_unit_path_found:
+ messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path))
+ elif modern_unit_path_found:
+ pass # unit tests only run from one directory so no message is needed
+ else:
+ messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path)
diff --git a/test/lib/ansible_test/_internal/provider/layout/unsupported.py b/test/lib/ansible_test/_internal/provider/layout/unsupported.py
new file mode 100644
index 0000000..16aa254
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/unsupported.py
@@ -0,0 +1,40 @@
+"""Layout provider for an unsupported directory layout."""
+from __future__ import annotations
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class UnsupportedLayout(LayoutProvider):
+ """Layout provider for an unsupported directory layout."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def create(self, root: str, paths: list[str]) -> ContentLayout:
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, p) for p in self.PLUGIN_TYPES)
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=None,
+ test_path='',
+ results_path='',
+ sanity_path='',
+ sanity_messages=None,
+ integration_path='',
+ integration_targets_path='',
+ integration_vars_path='',
+ integration_messages=None,
+ unit_path='',
+ unit_module_path='',
+ unit_module_utils_path='',
+ unit_messages=None,
+ unsupported=True,
+ )
diff --git a/test/lib/ansible_test/_internal/provider/source/__init__.py b/test/lib/ansible_test/_internal/provider/source/__init__.py
new file mode 100644
index 0000000..aa8ca47
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/__init__.py
@@ -0,0 +1,15 @@
+"""Common code for source providers."""
+from __future__ import annotations
+
+import abc
+
+from .. import (
+ PathProvider,
+)
+
+
+class SourceProvider(PathProvider):
+ """Base class for source providers."""
+ @abc.abstractmethod
+ def get_paths(self, path: str) -> list[str]:
+ """Return the list of available content paths under the given path."""
diff --git a/test/lib/ansible_test/_internal/provider/source/git.py b/test/lib/ansible_test/_internal/provider/source/git.py
new file mode 100644
index 0000000..37f16bf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/git.py
@@ -0,0 +1,69 @@
+"""Source provider for a content root managed by git version control."""
+from __future__ import annotations
+
+import os
+
+from ...git import (
+ Git,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from ...util import (
+ SubprocessError,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class GitSource(SourceProvider):
+ """Source provider for a content root managed by git version control."""
+ @staticmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, '.git'))
+
+ def get_paths(self, path: str) -> list[str]:
+ """Return the list of available content paths under the given path."""
+ paths = self.__get_paths(path)
+
+ try:
+ submodule_paths = Git(path).get_submodule_paths()
+ except SubprocessError:
+ if path == self.root:
+ raise
+
+ # older versions of git require submodule commands to be executed from the top level of the working tree
+ # git version 2.18.1 (centos8) does not have this restriction
+ # git version 1.8.3.1 (centos7) does
+ # fall back to using the top level directory of the working tree only when needed
+ # this avoids penalizing newer git versions with a potentially slower analysis due to additional submodules
+ rel_path = os.path.relpath(path, self.root) + os.path.sep
+
+ submodule_paths = Git(self.root).get_submodule_paths()
+ submodule_paths = [os.path.relpath(p, rel_path) for p in submodule_paths if p.startswith(rel_path)]
+
+ for submodule_path in submodule_paths:
+ paths.extend(os.path.join(submodule_path, p) for p in self.__get_paths(os.path.join(path, submodule_path)))
+
+ # git reports submodule directories as regular files
+ paths = [p for p in paths if p not in submodule_paths]
+
+ return paths
+
+ @staticmethod
+ def __get_paths(path: str) -> list[str]:
+ """Return the list of available content paths under the given path."""
+ git = Git(path)
+ paths = git.get_file_names(['--cached', '--others', '--exclude-standard'])
+ deleted_paths = git.get_file_names(['--deleted'])
+ paths = sorted(set(paths) - set(deleted_paths))
+
+ # directory symlinks are reported by git as regular files but they need to be treated as directories
+ paths = [path + os.path.sep if os.path.isdir(to_bytes(path)) else path for path in paths]
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/installed.py b/test/lib/ansible_test/_internal/provider/source/installed.py
new file mode 100644
index 0000000..6b82188
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/installed.py
@@ -0,0 +1,40 @@
+"""Source provider for content which has been installed."""
+from __future__ import annotations
+
+import os
+
+from . import (
+ SourceProvider,
+)
+
+
+class InstalledSource(SourceProvider):
+ """Source provider for content which has been installed."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path: str) -> list[str]:
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ )
+
+ for root, _dummy, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions])
+
+ # NOTE: directory symlinks are ignored as there should be no directory symlinks for an install
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/unsupported.py b/test/lib/ansible_test/_internal/provider/source/unsupported.py
new file mode 100644
index 0000000..e2f8953
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/unsupported.py
@@ -0,0 +1,20 @@
+"""Source provider to use when the layout is unsupported."""
+from __future__ import annotations
+
+from . import (
+ SourceProvider,
+)
+
+
+class UnsupportedSource(SourceProvider):
+ """Source provider to use when the layout is unsupported."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path: str) -> list[str]:
+ """Return the list of available content paths under the given path."""
+ return []
diff --git a/test/lib/ansible_test/_internal/provider/source/unversioned.py b/test/lib/ansible_test/_internal/provider/source/unversioned.py
new file mode 100644
index 0000000..d8eff5d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/unversioned.py
@@ -0,0 +1,85 @@
+"""Fallback source provider when no other provider matches the content root."""
+from __future__ import annotations
+
+import os
+
+from ...constants import (
+ TIMEOUT_PATH,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class UnversionedSource(SourceProvider):
+ """Fallback source provider when no other provider matches the content root."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path: str) -> bool:
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path: str) -> list[str]:
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_any_dir = (
+ '.idea',
+ '.pytest_cache',
+ '__pycache__',
+ 'ansible.egg-info',
+ 'ansible_base.egg-info',
+ 'ansible_core.egg-info',
+ )
+
+ kill_sub_dir = {
+ 'test': (
+ 'results',
+ 'cache',
+ 'output',
+ ),
+ 'tests': (
+ 'output',
+ ),
+ 'docs/docsite': (
+ '_build',
+ ),
+ }
+
+ kill_sub_file = {
+ '': (
+ TIMEOUT_PATH,
+ ),
+ }
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ '.retry',
+ )
+
+ for root, dir_names, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ for kill in kill_any_dir + kill_sub_dir.get(rel_root, ()):
+ if kill in dir_names:
+ dir_names.remove(kill)
+
+ kill_files = kill_sub_file.get(rel_root, ())
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions and file_name not in kill_files])
+
+ # include directory symlinks since they will not be traversed and would otherwise go undetected
+ paths.extend([os.path.join(rel_root, dir_name) + os.path.sep for dir_name in dir_names if os.path.islink(to_bytes(dir_name))])
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provisioning.py b/test/lib/ansible_test/_internal/provisioning.py
new file mode 100644
index 0000000..7547a30
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provisioning.py
@@ -0,0 +1,214 @@
+"""Provision hosts for running tests."""
+from __future__ import annotations
+
+import atexit
+import collections.abc as c
+import dataclasses
+import functools
+import itertools
+import os
+import pickle
+import sys
+import time
+import traceback
+import typing as t
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .util import (
+ ApplicationError,
+ HostConnectionError,
+ display,
+ open_binary_file,
+ verify_sys_executable,
+ version_to_str,
+ type_guard,
+)
+
+from .thread import (
+ WrappedThread,
+)
+
+from .host_profiles import (
+ ControllerHostProfile,
+ DockerProfile,
+ HostProfile,
+ SshConnection,
+ SshTargetHostProfile,
+ create_host_profile,
+)
+
+from .pypi_proxy import (
+ run_pypi_proxy,
+)
+
+THostProfile = t.TypeVar('THostProfile', bound=HostProfile)
+TEnvironmentConfig = t.TypeVar('TEnvironmentConfig', bound=EnvironmentConfig)
+
+
+class PrimeContainers(ApplicationError):
+ """Exception raised to end execution early after priming containers."""
+
+
+@dataclasses.dataclass(frozen=True)
+class HostState:
+ """State of hosts and profiles to be passed to ansible-test during delegation."""
+ controller_profile: ControllerHostProfile
+ target_profiles: list[HostProfile]
+
+ @property
+ def profiles(self) -> list[HostProfile]:
+ """Return all the profiles as a list."""
+ return [t.cast(HostProfile, self.controller_profile)] + self.target_profiles
+
+ def serialize(self, path: str) -> None:
+ """Serialize the host state to the given path."""
+ with open_binary_file(path, 'wb') as state_file:
+ pickle.dump(self, state_file)
+
+ @staticmethod
+ def deserialize(args: EnvironmentConfig, path: str) -> HostState:
+ """Deserialize host state from the given args and path."""
+ with open_binary_file(path) as state_file:
+ host_state: HostState = pickle.load(state_file)
+
+ host_state.controller_profile.args = args
+
+ for target in host_state.target_profiles:
+ target.args = args
+
+ return host_state
+
+ def get_controller_target_connections(self) -> list[SshConnection]:
+ """Return SSH connection(s) for accessing all target hosts from the controller."""
+ return list(itertools.chain.from_iterable([target.get_controller_target_connections() for
+ target in self.target_profiles if isinstance(target, SshTargetHostProfile)]))
+
+ def targets(self, profile_type: t.Type[THostProfile]) -> list[THostProfile]:
+ """The list of target(s), verified to be of the specified type."""
+ if not self.target_profiles:
+ raise Exception('No target profiles found.')
+
+ assert type_guard(self.target_profiles, profile_type)
+
+ return t.cast(list[THostProfile], self.target_profiles)
+
+
+def prepare_profiles(
+ args: TEnvironmentConfig,
+ targets_use_pypi: bool = False,
+ skip_setup: bool = False,
+ requirements: t.Optional[c.Callable[[HostProfile], None]] = None,
+) -> HostState:
+ """
+ Create new profiles, or load existing ones, and return them.
+ If a requirements callback was provided, it will be used before configuring hosts if delegation has already been performed.
+ """
+ if args.host_path:
+ host_state = HostState.deserialize(args, os.path.join(args.host_path, 'state.dat'))
+ else:
+ run_pypi_proxy(args, targets_use_pypi)
+
+ host_state = HostState(
+ controller_profile=t.cast(ControllerHostProfile, create_host_profile(args, args.controller, True)),
+ target_profiles=[create_host_profile(args, target, False) for target in args.targets],
+ )
+
+ if args.prime_containers:
+ for host_profile in host_state.profiles:
+ if isinstance(host_profile, DockerProfile):
+ host_profile.provision()
+
+ raise PrimeContainers()
+
+ atexit.register(functools.partial(cleanup_profiles, host_state))
+
+ def provision(profile: HostProfile) -> None:
+ """Provision the given profile."""
+ profile.provision()
+
+ if not skip_setup:
+ profile.setup()
+
+ dispatch_jobs([(profile, WrappedThread(functools.partial(provision, profile))) for profile in host_state.profiles])
+
+ host_state.controller_profile.configure()
+
+ if not args.delegate:
+ check_controller_python(args, host_state)
+
+ if requirements:
+ requirements(host_state.controller_profile)
+
+ def configure(profile: HostProfile) -> None:
+ """Configure the given profile."""
+ profile.wait()
+
+ if not skip_setup:
+ profile.configure()
+
+ if requirements:
+ requirements(profile)
+
+ dispatch_jobs([(profile, WrappedThread(functools.partial(configure, profile))) for profile in host_state.target_profiles])
+
+ return host_state
+
+
+def check_controller_python(args: EnvironmentConfig, host_state: HostState) -> None:
+ """Check the running environment to make sure it is what we expected."""
+ sys_version = version_to_str(sys.version_info[:2])
+ controller_python = host_state.controller_profile.python
+
+ if expected_executable := verify_sys_executable(controller_python.path):
+ raise ApplicationError(f'Running under Python interpreter "{sys.executable}" instead of "{expected_executable}".')
+
+ expected_version = controller_python.version
+
+ if expected_version != sys_version:
+ raise ApplicationError(f'Running under Python version {sys_version} instead of {expected_version}.')
+
+ args.controller_python = controller_python
+
+
+def cleanup_profiles(host_state: HostState) -> None:
+ """Cleanup provisioned hosts when exiting."""
+ for profile in host_state.profiles:
+ profile.deprovision()
+
+
+def dispatch_jobs(jobs: list[tuple[HostProfile, WrappedThread]]) -> None:
+ """Run the given profile job threads and wait for them to complete."""
+ for profile, thread in jobs:
+ thread.daemon = True
+ thread.start()
+
+ while any(thread.is_alive() for profile, thread in jobs):
+ time.sleep(1)
+
+ failed = False
+ connection_failures = 0
+
+ for profile, thread in jobs:
+ try:
+ thread.wait_for_result()
+ except HostConnectionError as ex:
+ display.error(f'Host {profile.config} connection failed:\n{ex}')
+ failed = True
+ connection_failures += 1
+ except ApplicationError as ex:
+ display.error(f'Host {profile.config} job failed:\n{ex}')
+ failed = True
+ except Exception as ex: # pylint: disable=broad-except
+ name = f'{"" if ex.__class__.__module__ == "builtins" else ex.__class__.__module__ + "."}{ex.__class__.__qualname__}'
+ display.error(f'Host {profile.config} job failed:\nTraceback (most recent call last):\n'
+ f'{"".join(traceback.format_tb(ex.__traceback__)).rstrip()}\n{name}: {ex}')
+ failed = True
+
+ if connection_failures:
+ raise HostConnectionError(f'Host job(s) failed, including {connection_failures} connection failure(s). See previous error(s) for details.')
+
+ if failed:
+ raise ApplicationError('Host job(s) failed. See previous error(s) for details.')
diff --git a/test/lib/ansible_test/_internal/pypi_proxy.py b/test/lib/ansible_test/_internal/pypi_proxy.py
new file mode 100644
index 0000000..97663ea
--- /dev/null
+++ b/test/lib/ansible_test/_internal/pypi_proxy.py
@@ -0,0 +1,180 @@
+"""PyPI proxy management."""
+from __future__ import annotations
+
+import atexit
+import os
+import urllib.parse
+
+from .io import (
+ write_text_file,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .host_configs import (
+ PosixConfig,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+)
+
+from .util_common import (
+ process_scoped_temporary_file,
+)
+
+from .docker_util import (
+ docker_available,
+)
+
+from .containers import (
+ HostType,
+ get_container_database,
+ run_support_container,
+)
+
+from .ansible_util import (
+ run_playbook,
+)
+
+from .host_profiles import (
+ HostProfile,
+)
+
+from .inventory import (
+ create_posix_inventory,
+)
+
+
+def run_pypi_proxy(args: EnvironmentConfig, targets_use_pypi: bool) -> None:
+ """Run a PyPI proxy support container."""
+ if args.pypi_endpoint:
+ return # user has overridden the proxy endpoint, there is nothing to provision
+
+ versions_needing_proxy: tuple[str, ...] = tuple() # preserved for future use, no versions currently require this
+ posix_targets = [target for target in args.targets if isinstance(target, PosixConfig)]
+ need_proxy = targets_use_pypi and any(target.python.version in versions_needing_proxy for target in posix_targets)
+ use_proxy = args.pypi_proxy or need_proxy
+
+ if not use_proxy:
+ return
+
+ if not docker_available():
+ if args.pypi_proxy:
+ raise ApplicationError('Use of the PyPI proxy was requested, but Docker is not available.')
+
+ display.warning('Unable to use the PyPI proxy because Docker is not available. Installation of packages using `pip` may fail.')
+ return
+
+ image = 'quay.io/ansible/pypi-test-container:2.0.0'
+ port = 3141
+
+ run_support_container(
+ args=args,
+ context='__pypi_proxy__',
+ image=image,
+ name=f'pypi-test-container-{args.session_name}',
+ ports=[port],
+ )
+
+
+def configure_pypi_proxy(args: EnvironmentConfig, profile: HostProfile) -> None:
+ """Configure the environment to use a PyPI proxy, if present."""
+ if args.pypi_endpoint:
+ pypi_endpoint = args.pypi_endpoint
+ else:
+ containers = get_container_database(args)
+ context = containers.data.get(HostType.control if profile.controller else HostType.managed, {}).get('__pypi_proxy__')
+
+ if not context:
+ return # proxy not configured
+
+ access = list(context.values())[0]
+
+ host = access.host_ip
+ port = dict(access.port_map())[3141]
+
+ pypi_endpoint = f'http://{host}:{port}/root/pypi/+simple/'
+
+ pypi_hostname = urllib.parse.urlparse(pypi_endpoint)[1].split(':')[0]
+
+ if profile.controller:
+ configure_controller_pypi_proxy(args, profile, pypi_endpoint, pypi_hostname)
+ else:
+ configure_target_pypi_proxy(args, profile, pypi_endpoint, pypi_hostname)
+
+
+def configure_controller_pypi_proxy(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str, pypi_hostname: str) -> None:
+ """Configure the controller environment to use a PyPI proxy."""
+ configure_pypi_proxy_pip(args, profile, pypi_endpoint, pypi_hostname)
+ configure_pypi_proxy_easy_install(args, profile, pypi_endpoint)
+
+
+def configure_target_pypi_proxy(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str, pypi_hostname: str) -> None:
+ """Configure the target environment to use a PyPI proxy."""
+ inventory_path = process_scoped_temporary_file(args)
+
+ create_posix_inventory(args, inventory_path, [profile])
+
+ def cleanup_pypi_proxy() -> None:
+ """Undo changes made to configure the PyPI proxy."""
+ run_playbook(args, inventory_path, 'pypi_proxy_restore.yml', capture=True)
+
+ force = 'yes' if profile.config.is_managed else 'no'
+
+ run_playbook(args, inventory_path, 'pypi_proxy_prepare.yml', capture=True, variables=dict(
+ pypi_endpoint=pypi_endpoint, pypi_hostname=pypi_hostname, force=force))
+
+ atexit.register(cleanup_pypi_proxy)
+
+
+def configure_pypi_proxy_pip(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str, pypi_hostname: str) -> None:
+ """Configure a custom index for pip based installs."""
+ pip_conf_path = os.path.expanduser('~/.pip/pip.conf')
+ pip_conf = '''
+[global]
+index-url = {0}
+trusted-host = {1}
+'''.format(pypi_endpoint, pypi_hostname).strip()
+
+ def pip_conf_cleanup() -> None:
+ """Remove custom pip PyPI config."""
+ display.info('Removing custom PyPI config: %s' % pip_conf_path, verbosity=1)
+ os.remove(pip_conf_path)
+
+ if os.path.exists(pip_conf_path) and not profile.config.is_managed:
+ raise ApplicationError('Refusing to overwrite existing file: %s' % pip_conf_path)
+
+ display.info('Injecting custom PyPI config: %s' % pip_conf_path, verbosity=1)
+ display.info('Config: %s\n%s' % (pip_conf_path, pip_conf), verbosity=3)
+
+ if not args.explain:
+ write_text_file(pip_conf_path, pip_conf, True)
+ atexit.register(pip_conf_cleanup)
+
+
+def configure_pypi_proxy_easy_install(args: EnvironmentConfig, profile: HostProfile, pypi_endpoint: str) -> None:
+ """Configure a custom index for easy_install based installs."""
+ pydistutils_cfg_path = os.path.expanduser('~/.pydistutils.cfg')
+ pydistutils_cfg = '''
+[easy_install]
+index_url = {0}
+'''.format(pypi_endpoint).strip()
+
+ if os.path.exists(pydistutils_cfg_path) and not profile.config.is_managed:
+ raise ApplicationError('Refusing to overwrite existing file: %s' % pydistutils_cfg_path)
+
+ def pydistutils_cfg_cleanup() -> None:
+ """Remove custom PyPI config."""
+ display.info('Removing custom PyPI config: %s' % pydistutils_cfg_path, verbosity=1)
+ os.remove(pydistutils_cfg_path)
+
+ display.info('Injecting custom PyPI config: %s' % pydistutils_cfg_path, verbosity=1)
+ display.info('Config: %s\n%s' % (pydistutils_cfg_path, pydistutils_cfg), verbosity=3)
+
+ if not args.explain:
+ write_text_file(pydistutils_cfg_path, pydistutils_cfg, True)
+ atexit.register(pydistutils_cfg_cleanup)
diff --git a/test/lib/ansible_test/_internal/python_requirements.py b/test/lib/ansible_test/_internal/python_requirements.py
new file mode 100644
index 0000000..e3733a5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/python_requirements.py
@@ -0,0 +1,570 @@
+"""Python requirements management"""
+from __future__ import annotations
+
+import base64
+import dataclasses
+import json
+import os
+import re
+import typing as t
+
+from .encoding import (
+ to_text,
+ to_bytes,
+)
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_TEST_TARGET_ROOT,
+ ANSIBLE_TEST_TOOLS_ROOT,
+ ApplicationError,
+ SubprocessError,
+ display,
+ find_executable,
+ raw_command,
+ str_to_version,
+ version_to_str,
+)
+
+from .util_common import (
+ check_pyyaml,
+ create_result_directories,
+)
+
+from .config import (
+ EnvironmentConfig,
+ IntegrationConfig,
+ UnitsConfig,
+)
+
+from .data import (
+ data_context,
+)
+
+from .host_configs import (
+ PosixConfig,
+ PythonConfig,
+)
+
+from .connections import (
+ LocalConnection,
+ Connection,
+)
+
+from .coverage_util import (
+ get_coverage_version,
+)
+
+QUIET_PIP_SCRIPT_PATH = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'quiet_pip.py')
+REQUIREMENTS_SCRIPT_PATH = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'requirements.py')
+
+# IMPORTANT: Keep this in sync with the ansible-test.txt requirements file.
+VIRTUALENV_VERSION = '16.7.12'
+
+# Pip Abstraction
+
+
+class PipUnavailableError(ApplicationError):
+ """Exception raised when pip is not available."""
+ def __init__(self, python: PythonConfig) -> None:
+ super().__init__(f'Python {python.version} at "{python.path}" does not have pip available.')
+
+
+@dataclasses.dataclass(frozen=True)
+class PipCommand:
+ """Base class for pip commands."""""
+
+ def serialize(self) -> tuple[str, dict[str, t.Any]]:
+ """Return a serialized representation of this command."""
+ name = type(self).__name__[3:].lower()
+ return name, self.__dict__
+
+
+@dataclasses.dataclass(frozen=True)
+class PipInstall(PipCommand):
+ """Details required to perform a pip install."""
+ requirements: list[tuple[str, str]]
+ constraints: list[tuple[str, str]]
+ packages: list[str]
+
+ def has_package(self, name: str) -> bool:
+ """Return True if the specified package will be installed, otherwise False."""
+ name = name.lower()
+
+ return (any(name in package.lower() for package in self.packages) or
+ any(name in contents.lower() for path, contents in self.requirements))
+
+
+@dataclasses.dataclass(frozen=True)
+class PipUninstall(PipCommand):
+ """Details required to perform a pip uninstall."""
+ packages: list[str]
+ ignore_errors: bool
+
+
+@dataclasses.dataclass(frozen=True)
+class PipVersion(PipCommand):
+ """Details required to get the pip version."""
+
+
+@dataclasses.dataclass(frozen=True)
+class PipBootstrap(PipCommand):
+ """Details required to bootstrap pip."""
+ pip_version: str
+ packages: list[str]
+
+
+# Entry Points
+
+
+def install_requirements(
+ args: EnvironmentConfig,
+ python: PythonConfig,
+ ansible: bool = False,
+ command: bool = False,
+ coverage: bool = False,
+ virtualenv: bool = False,
+ controller: bool = True,
+ connection: t.Optional[Connection] = None,
+) -> None:
+ """Install requirements for the given Python using the specified arguments."""
+ create_result_directories(args)
+
+ if not requirements_allowed(args, controller):
+ return
+
+ if command and isinstance(args, (UnitsConfig, IntegrationConfig)) and args.coverage:
+ coverage = True
+
+ cryptography = False
+
+ if ansible:
+ try:
+ ansible_cache = install_requirements.ansible_cache # type: ignore[attr-defined]
+ except AttributeError:
+ ansible_cache = install_requirements.ansible_cache = {} # type: ignore[attr-defined]
+
+ ansible_installed = ansible_cache.get(python.path)
+
+ if ansible_installed:
+ ansible = False
+ else:
+ ansible_cache[python.path] = True
+
+ # Install the latest cryptography version that the current requirements can support if it is not already available.
+ # This avoids downgrading cryptography when OS packages provide a newer version than we are able to install using pip.
+ # If not installed here, later install commands may try to install a version of cryptography which cannot be installed.
+ cryptography = not is_cryptography_available(python.path)
+
+ commands = collect_requirements(
+ python=python,
+ controller=controller,
+ ansible=ansible,
+ cryptography=cryptography,
+ command=args.command if command else None,
+ coverage=coverage,
+ virtualenv=virtualenv,
+ minimize=False,
+ sanity=None,
+ )
+
+ if not commands:
+ return
+
+ run_pip(args, python, commands, connection)
+
+ # false positive: pylint: disable=no-member
+ if any(isinstance(command, PipInstall) and command.has_package('pyyaml') for command in commands):
+ check_pyyaml(python)
+
+
+def collect_bootstrap(python: PythonConfig) -> list[PipCommand]:
+ """Return the details necessary to bootstrap pip into an empty virtual environment."""
+ infrastructure_packages = get_venv_packages(python)
+ pip_version = infrastructure_packages['pip']
+ packages = [f'{name}=={version}' for name, version in infrastructure_packages.items()]
+
+ bootstrap = PipBootstrap(
+ pip_version=pip_version,
+ packages=packages,
+ )
+
+ return [bootstrap]
+
+
+def collect_requirements(
+ python: PythonConfig,
+ controller: bool,
+ ansible: bool,
+ cryptography: bool,
+ coverage: bool,
+ virtualenv: bool,
+ minimize: bool,
+ command: t.Optional[str],
+ sanity: t.Optional[str],
+) -> list[PipCommand]:
+ """Collect requirements for the given Python using the specified arguments."""
+ commands: list[PipCommand] = []
+
+ if virtualenv:
+ # sanity tests on Python 2.x install virtualenv when it is too old or is not already installed and the `--requirements` option is given
+ # the last version of virtualenv with no dependencies is used to minimize the changes made outside a virtual environment
+ commands.extend(collect_package_install(packages=[f'virtualenv=={VIRTUALENV_VERSION}'], constraints=False))
+
+ if coverage:
+ commands.extend(collect_package_install(packages=[f'coverage=={get_coverage_version(python.version).coverage_version}'], constraints=False))
+
+ if cryptography:
+ commands.extend(collect_package_install(packages=get_cryptography_requirements(python)))
+
+ if ansible or command:
+ commands.extend(collect_general_install(command, ansible))
+
+ if sanity:
+ commands.extend(collect_sanity_install(sanity))
+
+ if command == 'units':
+ commands.extend(collect_units_install())
+
+ if command in ('integration', 'windows-integration', 'network-integration'):
+ commands.extend(collect_integration_install(command, controller))
+
+ if (sanity or minimize) and any(isinstance(command, PipInstall) for command in commands):
+ # bootstrap the managed virtual environment, which will have been created without any installed packages
+ # sanity tests which install no packages skip this step
+ commands = collect_bootstrap(python) + commands
+
+ # most infrastructure packages can be removed from sanity test virtual environments after they've been created
+ # removing them reduces the size of environments cached in containers
+ uninstall_packages = list(get_venv_packages(python))
+
+ if not minimize:
+ # installed packages may have run-time dependencies on setuptools
+ uninstall_packages.remove('setuptools')
+
+ commands.extend(collect_uninstall(packages=uninstall_packages))
+
+ return commands
+
+
+def run_pip(
+ args: EnvironmentConfig,
+ python: PythonConfig,
+ commands: list[PipCommand],
+ connection: t.Optional[Connection],
+) -> None:
+ """Run the specified pip commands for the given Python, and optionally the specified host."""
+ connection = connection or LocalConnection(args)
+ script = prepare_pip_script(commands)
+
+ if not args.explain:
+ try:
+ connection.run([python.path], data=script, capture=False)
+ except SubprocessError:
+ script = prepare_pip_script([PipVersion()])
+
+ try:
+ connection.run([python.path], data=script, capture=True)
+ except SubprocessError as ex:
+ if 'pip is unavailable:' in ex.stdout + ex.stderr:
+ raise PipUnavailableError(python)
+
+ raise
+
+
+# Collect
+
+
+def collect_general_install(
+ command: t.Optional[str] = None,
+ ansible: bool = False,
+) -> list[PipInstall]:
+ """Return details necessary for the specified general-purpose pip install(s)."""
+ requirements_paths: list[tuple[str, str]] = []
+ constraints_paths: list[tuple[str, str]] = []
+
+ if ansible:
+ path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'ansible.txt')
+ requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))
+
+ if command:
+ path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', f'{command}.txt')
+ requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))
+
+ return collect_install(requirements_paths, constraints_paths)
+
+
+def collect_package_install(packages: list[str], constraints: bool = True) -> list[PipInstall]:
+ """Return the details necessary to install the specified packages."""
+ return collect_install([], [], packages, constraints=constraints)
+
+
+def collect_sanity_install(sanity: str) -> list[PipInstall]:
+ """Return the details necessary for the specified sanity pip install(s)."""
+ requirements_paths: list[tuple[str, str]] = []
+ constraints_paths: list[tuple[str, str]] = []
+
+ path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', f'sanity.{sanity}.txt')
+ requirements_paths.append((ANSIBLE_TEST_DATA_ROOT, path))
+
+ if data_context().content.is_ansible:
+ path = os.path.join(data_context().content.sanity_path, 'code-smell', f'{sanity}.requirements.txt')
+ requirements_paths.append((data_context().content.root, path))
+
+ return collect_install(requirements_paths, constraints_paths, constraints=False)
+
+
+def collect_units_install() -> list[PipInstall]:
+ """Return details necessary for the specified units pip install(s)."""
+ requirements_paths: list[tuple[str, str]] = []
+ constraints_paths: list[tuple[str, str]] = []
+
+ path = os.path.join(data_context().content.unit_path, 'requirements.txt')
+ requirements_paths.append((data_context().content.root, path))
+
+ path = os.path.join(data_context().content.unit_path, 'constraints.txt')
+ constraints_paths.append((data_context().content.root, path))
+
+ return collect_install(requirements_paths, constraints_paths)
+
+
+def collect_integration_install(command: str, controller: bool) -> list[PipInstall]:
+ """Return details necessary for the specified integration pip install(s)."""
+ requirements_paths: list[tuple[str, str]] = []
+ constraints_paths: list[tuple[str, str]] = []
+
+ # Support for prefixed files was added to ansible-test in ansible-core 2.12 when split controller/target testing was implemented.
+ # Previous versions of ansible-test only recognize non-prefixed files.
+ # If a prefixed file exists (even if empty), it takes precedence over the non-prefixed file.
+ prefixes = ('controller.' if controller else 'target.', '')
+
+ for prefix in prefixes:
+ path = os.path.join(data_context().content.integration_path, f'{prefix}requirements.txt')
+
+ if os.path.exists(path):
+ requirements_paths.append((data_context().content.root, path))
+ break
+
+ for prefix in prefixes:
+ path = os.path.join(data_context().content.integration_path, f'{command}.{prefix}requirements.txt')
+
+ if os.path.exists(path):
+ requirements_paths.append((data_context().content.root, path))
+ break
+
+ for prefix in prefixes:
+ path = os.path.join(data_context().content.integration_path, f'{prefix}constraints.txt')
+
+ if os.path.exists(path):
+ constraints_paths.append((data_context().content.root, path))
+ break
+
+ return collect_install(requirements_paths, constraints_paths)
+
+
+def collect_install(
+ requirements_paths: list[tuple[str, str]],
+ constraints_paths: list[tuple[str, str]],
+ packages: t.Optional[list[str]] = None,
+ constraints: bool = True,
+) -> list[PipInstall]:
+ """Build a pip install list from the given requirements, constraints and packages."""
+ # listing content constraints first gives them priority over constraints provided by ansible-test
+ constraints_paths = list(constraints_paths)
+
+ if constraints:
+ constraints_paths.append((ANSIBLE_TEST_DATA_ROOT, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt')))
+
+ requirements = [(os.path.relpath(path, root), read_text_file(path)) for root, path in requirements_paths if usable_pip_file(path)]
+ constraints = [(os.path.relpath(path, root), read_text_file(path)) for root, path in constraints_paths if usable_pip_file(path)]
+ packages = packages or []
+
+ if requirements or packages:
+ installs = [PipInstall(
+ requirements=requirements,
+ constraints=constraints,
+ packages=packages,
+ )]
+ else:
+ installs = []
+
+ return installs
+
+
+def collect_uninstall(packages: list[str], ignore_errors: bool = False) -> list[PipUninstall]:
+ """Return the details necessary for the specified pip uninstall."""
+ uninstall = PipUninstall(
+ packages=packages,
+ ignore_errors=ignore_errors,
+ )
+
+ return [uninstall]
+
+
+# Support
+
+
+def get_venv_packages(python: PythonConfig) -> dict[str, str]:
+ """Return a dictionary of Python packages needed for a consistent virtual environment specific to the given Python version."""
+
+ # NOTE: This same information is needed for building the base-test-container image.
+ # See: https://github.com/ansible/base-test-container/blob/main/files/installer.py
+
+ default_packages = dict(
+ pip='21.3.1',
+ setuptools='60.8.2',
+ wheel='0.37.1',
+ )
+
+ override_packages = {
+ '2.7': dict(
+ pip='20.3.4', # 21.0 requires Python 3.6+
+ setuptools='44.1.1', # 45.0.0 requires Python 3.5+
+ wheel=None,
+ ),
+ '3.5': dict(
+ pip='20.3.4', # 21.0 requires Python 3.6+
+ setuptools='50.3.2', # 51.0.0 requires Python 3.6+
+ wheel=None,
+ ),
+ '3.6': dict(
+ pip='21.3.1', # 22.0 requires Python 3.7+
+ setuptools='59.6.0', # 59.7.0 requires Python 3.7+
+ wheel=None,
+ ),
+ }
+
+ packages = {name: version or default_packages[name] for name, version in override_packages.get(python.version, default_packages).items()}
+
+ return packages
+
+
+def requirements_allowed(args: EnvironmentConfig, controller: bool) -> bool:
+ """
+ Return True if requirements can be installed, otherwise return False.
+
+ Requirements are only allowed if one of the following conditions is met:
+
+ The user specified --requirements manually.
+ The install will occur on the controller and the controller or controller Python is managed by ansible-test.
+ The install will occur on the target and the target or target Python is managed by ansible-test.
+ """
+ if args.requirements:
+ return True
+
+ if controller:
+ return args.controller.is_managed or args.controller.python.is_managed
+
+ target = args.only_targets(PosixConfig)[0]
+
+ return target.is_managed or target.python.is_managed
+
+
+def prepare_pip_script(commands: list[PipCommand]) -> str:
+ """Generate a Python script to perform the requested pip commands."""
+ data = [command.serialize() for command in commands]
+
+ display.info(f'>>> Requirements Commands\n{json.dumps(data, indent=4)}', verbosity=3)
+
+ args = dict(
+ script=read_text_file(QUIET_PIP_SCRIPT_PATH),
+ verbosity=display.verbosity,
+ commands=data,
+ )
+
+ payload = to_text(base64.b64encode(to_bytes(json.dumps(args))))
+ path = REQUIREMENTS_SCRIPT_PATH
+ template = read_text_file(path)
+ script = template.format(payload=payload)
+
+ display.info(f'>>> Python Script from Template ({path})\n{script.strip()}', verbosity=4)
+
+ return script
+
+
+def usable_pip_file(path: t.Optional[str]) -> bool:
+ """Return True if the specified pip file is usable, otherwise False."""
+ return bool(path) and os.path.exists(path) and bool(os.path.getsize(path))
+
+
+# Cryptography
+
+
+def is_cryptography_available(python: str) -> bool:
+ """Return True if cryptography is available for the given python."""
+ try:
+ raw_command([python, '-c', 'import cryptography'], capture=True)
+ except SubprocessError:
+ return False
+
+ return True
+
+
+def get_cryptography_requirements(python: PythonConfig) -> list[str]:
+ """
+ Return the correct cryptography and pyopenssl requirements for the given python version.
+ The version of cryptography installed depends on the python version and openssl version.
+ """
+ openssl_version = get_openssl_version(python)
+
+ if openssl_version and openssl_version < (1, 1, 0):
+ # cryptography 3.2 requires openssl 1.1.x or later
+ # see https://cryptography.io/en/latest/changelog.html#v3-2
+ cryptography = 'cryptography < 3.2'
+ # pyopenssl 20.0.0 requires cryptography 3.2 or later
+ pyopenssl = 'pyopenssl < 20.0.0'
+ else:
+ # cryptography 3.4+ builds require a working rust toolchain
+ # systems bootstrapped using ansible-core-ci can access additional wheels through the spare-tire package index
+ cryptography = 'cryptography'
+ # any future installation of pyopenssl is free to use any compatible version of cryptography
+ pyopenssl = ''
+
+ requirements = [
+ cryptography,
+ pyopenssl,
+ ]
+
+ requirements = [requirement for requirement in requirements if requirement]
+
+ return requirements
+
+
+def get_openssl_version(python: PythonConfig) -> t.Optional[tuple[int, ...]]:
+ """Return the openssl version."""
+ if not python.version.startswith('2.'):
+ # OpenSSL version checking only works on Python 3.x.
+ # This should be the most accurate, since it is the Python we will be using.
+ version = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'sslcheck.py')], capture=True)[0])['version']
+
+ if version:
+ display.info(f'Detected OpenSSL version {version_to_str(version)} under Python {python.version}.', verbosity=1)
+
+ return tuple(version)
+
+ # Fall back to detecting the OpenSSL version from the CLI.
+ # This should provide an adequate solution on Python 2.x.
+ openssl_path = find_executable('openssl', required=False)
+
+ if openssl_path:
+ try:
+ result = raw_command([openssl_path, 'version'], capture=True)[0]
+ except SubprocessError:
+ result = ''
+
+ match = re.search(r'^OpenSSL (?P<version>[0-9]+\.[0-9]+\.[0-9]+)', result)
+
+ if match:
+ version = str_to_version(match.group('version'))
+
+ display.info(f'Detected OpenSSL version {version_to_str(version)} using the openssl CLI.', verbosity=1)
+
+ return version
+
+ display.info('Unable to detect OpenSSL version.', verbosity=1)
+
+ return None
diff --git a/test/lib/ansible_test/_internal/ssh.py b/test/lib/ansible_test/_internal/ssh.py
new file mode 100644
index 0000000..840edf6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ssh.py
@@ -0,0 +1,299 @@
+"""High level functions for working with SSH."""
+from __future__ import annotations
+
+import dataclasses
+import itertools
+import json
+import os
+import random
+import re
+import subprocess
+import shlex
+import typing as t
+
+from .encoding import (
+ to_bytes,
+ to_text,
+)
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ exclude_none_values,
+ sanitize_host_name,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+
+@dataclasses.dataclass
+class SshConnectionDetail:
+ """Information needed to establish an SSH connection to a host."""
+ name: str
+ host: str
+ port: t.Optional[int]
+ user: str
+ identity_file: str
+ python_interpreter: t.Optional[str] = None
+ shell_type: t.Optional[str] = None
+ enable_rsa_sha1: bool = False
+
+ def __post_init__(self):
+ self.name = sanitize_host_name(self.name)
+
+ @property
+ def options(self) -> dict[str, str]:
+ """OpenSSH config options, which can be passed to the `ssh` CLI with the `-o` argument."""
+ options: dict[str, str] = {}
+
+ if self.enable_rsa_sha1:
+ # Newer OpenSSH clients connecting to older SSH servers must explicitly enable ssh-rsa support.
+ # OpenSSH 8.8, released on 2021-09-26, deprecated using RSA with the SHA-1 hash algorithm (ssh-rsa).
+ # OpenSSH 7.2, released on 2016-02-29, added support for using RSA with SHA-256/512 hash algorithms.
+ # See: https://www.openssh.com/txt/release-8.8
+ algorithms = '+ssh-rsa' # append the algorithm to the default list, requires OpenSSH 7.0 or later
+
+ options.update(dict(
+ # Host key signature algorithms that the client wants to use.
+ # Available options can be found with `ssh -Q HostKeyAlgorithms` or `ssh -Q key` on older clients.
+ # This option was updated in OpenSSH 7.0, released on 2015-08-11, to support the "+" prefix.
+ # See: https://www.openssh.com/txt/release-7.0
+ HostKeyAlgorithms=algorithms,
+ # Signature algorithms that will be used for public key authentication.
+ # Available options can be found with `ssh -Q PubkeyAcceptedAlgorithms` or `ssh -Q key` on older clients.
+ # This option was added in OpenSSH 7.0, released on 2015-08-11.
+ # See: https://www.openssh.com/txt/release-7.0
+ # This option is an alias for PubkeyAcceptedAlgorithms, which was added in OpenSSH 8.5.
+ # See: https://www.openssh.com/txt/release-8.5
+ PubkeyAcceptedKeyTypes=algorithms,
+ ))
+
+ return options
+
+
+class SshProcess:
+ """Wrapper around an SSH process."""
+ def __init__(self, process: t.Optional[subprocess.Popen]) -> None:
+ self._process = process
+ self.pending_forwards: t.Optional[list[tuple[str, int]]] = None
+
+ self.forwards: dict[tuple[str, int], int] = {}
+
+ def terminate(self) -> None:
+ """Terminate the SSH process."""
+ if not self._process:
+ return # explain mode
+
+ # noinspection PyBroadException
+ try:
+ self._process.terminate()
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ def wait(self) -> None:
+ """Wait for the SSH process to terminate."""
+ if not self._process:
+ return # explain mode
+
+ self._process.wait()
+
+ def collect_port_forwards(self) -> dict[tuple[str, int], int]:
+ """Collect port assignments for dynamic SSH port forwards."""
+ errors: list[str] = []
+
+ display.info('Collecting %d SSH port forward(s).' % len(self.pending_forwards), verbosity=2)
+
+ while self.pending_forwards:
+ if self._process:
+ line_bytes = self._process.stderr.readline()
+
+ if not line_bytes:
+ if errors:
+ details = ':\n%s' % '\n'.join(errors)
+ else:
+ details = '.'
+
+ raise ApplicationError('SSH port forwarding failed%s' % details)
+
+ line = to_text(line_bytes).strip()
+
+ match = re.search(r'^Allocated port (?P<src_port>[0-9]+) for remote forward to (?P<dst_host>[^:]+):(?P<dst_port>[0-9]+)$', line)
+
+ if not match:
+ if re.search(r'^Warning: Permanently added .* to the list of known hosts\.$', line):
+ continue
+
+ display.warning('Unexpected SSH port forwarding output: %s' % line, verbosity=2)
+
+ errors.append(line)
+ continue
+
+ src_port = int(match.group('src_port'))
+ dst_host = str(match.group('dst_host'))
+ dst_port = int(match.group('dst_port'))
+
+ dst = (dst_host, dst_port)
+ else:
+ # explain mode
+ dst = self.pending_forwards[0]
+ src_port = random.randint(40000, 50000)
+
+ self.pending_forwards.remove(dst)
+ self.forwards[dst] = src_port
+
+ display.info('Collected %d SSH port forward(s):\n%s' % (
+ len(self.forwards), '\n'.join('%s -> %s:%s' % (src_port, dst[0], dst[1]) for dst, src_port in sorted(self.forwards.items()))), verbosity=2)
+
+ return self.forwards
+
+
+def create_ssh_command(
+ ssh: SshConnectionDetail,
+ options: t.Optional[dict[str, t.Union[str, int]]] = None,
+ cli_args: list[str] = None,
+ command: t.Optional[str] = None,
+) -> list[str]:
+ """Create an SSH command using the specified options."""
+ cmd = [
+ 'ssh',
+ '-n', # prevent reading from stdin
+ '-i', ssh.identity_file, # file from which the identity for public key authentication is read
+ ]
+
+ if not command:
+ cmd.append('-N') # do not execute a remote command
+
+ if ssh.port:
+ cmd.extend(['-p', str(ssh.port)]) # port to connect to on the remote host
+
+ if ssh.user:
+ cmd.extend(['-l', ssh.user]) # user to log in as on the remote machine
+
+ ssh_options: dict[str, t.Union[int, str]] = dict(
+ BatchMode='yes',
+ ExitOnForwardFailure='yes',
+ LogLevel='ERROR',
+ ServerAliveCountMax=4,
+ ServerAliveInterval=15,
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ )
+
+ ssh_options.update(options or {})
+
+ cmd.extend(ssh_options_to_list(ssh_options))
+ cmd.extend(cli_args or [])
+ cmd.append(ssh.host)
+
+ if command:
+ cmd.append(command)
+
+ return cmd
+
+
+def ssh_options_to_list(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> list[str]:
+ """Format a dictionary of SSH options as a list suitable for passing to the `ssh` command."""
+ return list(itertools.chain.from_iterable(
+ ('-o', f'{key}={value}') for key, value in sorted(options.items())
+ ))
+
+
+def ssh_options_to_str(options: t.Union[dict[str, t.Union[int, str]], dict[str, str]]) -> str:
+ """Format a dictionary of SSH options as a string suitable for passing as `ansible_ssh_extra_args` in inventory."""
+ return shlex.join(ssh_options_to_list(options))
+
+
+def run_ssh_command(
+ args: EnvironmentConfig,
+ ssh: SshConnectionDetail,
+ options: t.Optional[dict[str, t.Union[str, int]]] = None,
+ cli_args: list[str] = None,
+ command: t.Optional[str] = None,
+) -> SshProcess:
+ """Run the specified SSH command, returning the created SshProcess instance created."""
+ cmd = create_ssh_command(ssh, options, cli_args, command)
+ env = common_environment()
+
+ cmd_show = shlex.join(cmd)
+ display.info('Run background command: %s' % cmd_show, verbosity=1, truncate=True)
+
+ cmd_bytes = [to_bytes(arg) for arg in cmd]
+ env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
+
+ if args.explain:
+ process = SshProcess(None)
+ else:
+ process = SshProcess(subprocess.Popen(cmd_bytes, env=env_bytes, bufsize=-1, # pylint: disable=consider-using-with
+ stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
+
+ return process
+
+
+def create_ssh_port_forwards(
+ args: EnvironmentConfig,
+ ssh: SshConnectionDetail,
+ forwards: list[tuple[str, int]],
+) -> SshProcess:
+ """
+ Create SSH port forwards using the provided list of tuples (target_host, target_port).
+ Port bindings will be automatically assigned by SSH and must be collected with a subsequent call to collect_port_forwards.
+ """
+ options: dict[str, t.Union[str, int]] = dict(
+ LogLevel='INFO', # info level required to get messages on stderr indicating the ports assigned to each forward
+ )
+
+ cli_args = []
+
+ for forward_host, forward_port in forwards:
+ cli_args.extend(['-R', ':'.join([str(0), forward_host, str(forward_port)])])
+
+ process = run_ssh_command(args, ssh, options, cli_args)
+ process.pending_forwards = forwards
+
+ return process
+
+
+def create_ssh_port_redirects(
+ args: EnvironmentConfig,
+ ssh: SshConnectionDetail,
+ redirects: list[tuple[int, str, int]],
+) -> SshProcess:
+ """Create SSH port redirections using the provided list of tuples (bind_port, target_host, target_port)."""
+ options: dict[str, t.Union[str, int]] = {}
+ cli_args = []
+
+ for bind_port, target_host, target_port in redirects:
+ cli_args.extend(['-R', ':'.join([str(bind_port), target_host, str(target_port)])])
+
+ process = run_ssh_command(args, ssh, options, cli_args)
+
+ return process
+
+
+def generate_ssh_inventory(ssh_connections: list[SshConnectionDetail]) -> str:
+ """Return an inventory file in JSON format, created from the provided SSH connection details."""
+ inventory = dict(
+ all=dict(
+ hosts=dict((ssh.name, exclude_none_values(dict(
+ ansible_host=ssh.host,
+ ansible_port=ssh.port,
+ ansible_user=ssh.user,
+ ansible_ssh_private_key_file=os.path.abspath(ssh.identity_file),
+ ansible_connection='ssh',
+ ansible_pipelining='yes',
+ ansible_python_interpreter=ssh.python_interpreter,
+ ansible_shell_type=ssh.shell_type,
+ ansible_ssh_extra_args=ssh_options_to_str(dict(UserKnownHostsFile='/dev/null', **ssh.options)), # avoid changing the test environment
+ ansible_ssh_host_key_checking='no',
+ ))) for ssh in ssh_connections),
+ ),
+ )
+
+ inventory_text = json.dumps(inventory, indent=4, sort_keys=True)
+
+ display.info('>>> SSH Inventory\n%s' % inventory_text, verbosity=3)
+
+ return inventory_text
diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py
new file mode 100644
index 0000000..8041148
--- /dev/null
+++ b/test/lib/ansible_test/_internal/target.py
@@ -0,0 +1,707 @@
+"""Test target identification, iteration and inclusion/exclusion."""
+from __future__ import annotations
+
+import collections
+import collections.abc as c
+import enum
+import os
+import re
+import itertools
+import abc
+import typing as t
+
+from .encoding import (
+ to_bytes,
+ to_text,
+)
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ read_lines_without_comments,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+ content_plugins,
+)
+
+MODULE_EXTENSIONS = '.py', '.ps1'
+
+
+def find_target_completion(target_func: c.Callable[[], c.Iterable[CompletionTarget]], prefix: str, short: bool) -> list[str]:
+ """Return a list of targets from the given target function which match the given prefix."""
+ try:
+ targets = target_func()
+ matches = list(walk_completion_targets(targets, prefix, short))
+ return matches
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ return ['%s' % ex]
+
+
+def walk_completion_targets(targets: c.Iterable[CompletionTarget], prefix: str, short: bool = False) -> tuple[str, ...]:
+ """Return a tuple of targets from the given target iterable which match the given prefix."""
+ aliases = set(alias for target in targets for alias in target.aliases)
+
+ if prefix.endswith('/') and prefix in aliases:
+ aliases.remove(prefix)
+
+ matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
+
+ if short:
+ offset = len(os.path.dirname(prefix))
+ if offset:
+ offset += 1
+ relative_matches = [match[offset:] for match in matches if len(match) > offset]
+ if len(relative_matches) > 1:
+ matches = relative_matches
+
+ return tuple(sorted(matches))
+
+
+def walk_internal_targets(
+ targets: c.Iterable[TCompletionTarget],
+ includes: t.Optional[list[str]] = None,
+ excludes: t.Optional[list[str]] = None,
+ requires: t.Optional[list[str]] = None,
+) -> tuple[TCompletionTarget, ...]:
+ """Return a tuple of matching completion targets."""
+ targets = tuple(targets)
+
+ include_targets = sorted(filter_targets(targets, includes), key=lambda include_target: include_target.name)
+
+ if requires:
+ require_targets = set(filter_targets(targets, requires))
+ include_targets = [require_target for require_target in include_targets if require_target in require_targets]
+
+ if excludes:
+ list(filter_targets(targets, excludes, include=False))
+
+ internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False))
+ return tuple(sorted(internal_targets, key=lambda sort_target: sort_target.name))
+
+
+def filter_targets(targets: c.Iterable[TCompletionTarget],
+ patterns: list[str],
+ include: bool = True,
+ errors: bool = True,
+ ) -> c.Iterable[TCompletionTarget]:
+ """Iterate over the given targets and filter them based on the supplied arguments."""
+ unmatched = set(patterns or ())
+ compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
+
+ for target in targets:
+ matched_directories = set()
+ match = False
+
+ if patterns:
+ for alias in target.aliases:
+ for pattern in patterns:
+ if compiled_patterns[pattern].match(alias):
+ match = True
+
+ try:
+ unmatched.remove(pattern)
+ except KeyError:
+ pass
+
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+ elif include:
+ match = True
+ if not target.base_path:
+ matched_directories.add('.')
+ for alias in target.aliases:
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+
+ if match != include:
+ continue
+
+ yield target
+
+ if errors:
+ if unmatched:
+ raise TargetPatternsNotMatched(unmatched)
+
+
+def walk_module_targets() -> c.Iterable[TestTarget]:
+ """Iterate through the module test targets."""
+ for target in walk_test_targets(path=data_context().content.module_path, module_path=data_context().content.module_path, extensions=MODULE_EXTENSIONS):
+ if not target.module:
+ continue
+
+ yield target
+
+
+def walk_units_targets() -> c.Iterable[TestTarget]:
+ """Return an iterable of units targets."""
+ return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_')
+
+
+def walk_compile_targets(include_symlinks: bool = True) -> c.Iterable[TestTarget]:
+ """Return an iterable of compile targets."""
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks)
+
+
+def walk_powershell_targets(include_symlinks: bool = True) -> c.Iterable[TestTarget]:
+ """Return an iterable of PowerShell targets."""
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.ps1', '.psm1'), include_symlinks=include_symlinks)
+
+
+def walk_sanity_targets() -> c.Iterable[TestTarget]:
+ """Return an iterable of sanity targets."""
+ return walk_test_targets(module_path=data_context().content.module_path, include_symlinks=True, include_symlinked_directories=True)
+
+
+def walk_posix_integration_targets(include_hidden: bool = False) -> c.Iterable[IntegrationTarget]:
+ """Return an iterable of POSIX integration targets."""
+ for target in walk_integration_targets():
+ if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
+ yield target
+
+
+def walk_network_integration_targets(include_hidden: bool = False) -> c.Iterable[IntegrationTarget]:
+ """Return an iterable of network integration targets."""
+ for target in walk_integration_targets():
+ if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
+ yield target
+
+
+def walk_windows_integration_targets(include_hidden: bool = False) -> c.Iterable[IntegrationTarget]:
+ """Return an iterable of windows integration targets."""
+ for target in walk_integration_targets():
+ if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
+ yield target
+
+
+def walk_integration_targets() -> c.Iterable[IntegrationTarget]:
+ """Return an iterable of integration targets."""
+ path = data_context().content.integration_targets_path
+ modules = frozenset(target.module for target in walk_module_targets())
+ paths = data_context().content.walk_files(path)
+ prefixes = load_integration_prefixes()
+ targets_path_tuple = tuple(path.split(os.path.sep))
+
+ entry_dirs = (
+ 'defaults',
+ 'files',
+ 'handlers',
+ 'meta',
+ 'tasks',
+ 'templates',
+ 'vars',
+ )
+
+ entry_files = (
+ 'main.yml',
+ 'main.yaml',
+ )
+
+ entry_points = []
+
+ for entry_dir in entry_dirs:
+ for entry_file in entry_files:
+ entry_points.append(os.path.join(os.path.sep, entry_dir, entry_file))
+
+ # any directory with at least one file is a target
+ path_tuples = set(tuple(os.path.dirname(p).split(os.path.sep))
+ for p in paths)
+
+ # also detect targets which are ansible roles, looking for standard entry points
+ path_tuples.update(tuple(os.path.dirname(os.path.dirname(p)).split(os.path.sep))
+ for p in paths if any(p.endswith(entry_point) for entry_point in entry_points))
+
+ # remove the top-level directory if it was included
+ if targets_path_tuple in path_tuples:
+ path_tuples.remove(targets_path_tuple)
+
+ previous_path_tuple = None
+ paths = []
+
+ for path_tuple in sorted(path_tuples):
+ if previous_path_tuple and previous_path_tuple == path_tuple[:len(previous_path_tuple)]:
+ # ignore nested directories
+ continue
+
+ previous_path_tuple = path_tuple
+ paths.append(os.path.sep.join(path_tuple))
+
+ for path in paths:
+ yield IntegrationTarget(to_text(path), modules, prefixes)
+
+
+def load_integration_prefixes() -> dict[str, str]:
+ """Load and return the integration test prefixes."""
+ path = data_context().content.integration_path
+ file_paths = sorted(f for f in data_context().content.get_files(path) if os.path.splitext(os.path.basename(f))[0] == 'target-prefixes')
+ prefixes = {}
+
+ for file_path in file_paths:
+ prefix = os.path.splitext(file_path)[1][1:]
+ prefixes.update(dict((k, prefix) for k in read_text_file(file_path).splitlines()))
+
+ return prefixes
+
+
+def walk_test_targets(
+ path: t.Optional[str] = None,
+ module_path: t.Optional[str] = None,
+ extensions: t.Optional[tuple[str, ...]] = None,
+ prefix: t.Optional[str] = None,
+ extra_dirs: t.Optional[tuple[str, ...]] = None,
+ include_symlinks: bool = False,
+ include_symlinked_directories: bool = False,
+) -> c.Iterable[TestTarget]:
+ """Iterate over available test targets."""
+ if path:
+ file_paths = data_context().content.walk_files(path, include_symlinked_directories=include_symlinked_directories)
+ else:
+ file_paths = data_context().content.all_files(include_symlinked_directories=include_symlinked_directories)
+
+ for file_path in file_paths:
+ name, ext = os.path.splitext(os.path.basename(file_path))
+
+ if extensions and ext not in extensions:
+ continue
+
+ if prefix and not name.startswith(prefix):
+ continue
+
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(to_text(file_path), module_path, prefix, path, symlink)
+
+ file_paths = []
+
+ if extra_dirs:
+ for extra_dir in extra_dirs:
+ for file_path in data_context().content.get_files(extra_dir):
+ file_paths.append(file_path)
+
+ for file_path in file_paths:
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(file_path, module_path, prefix, path, symlink)
+
+
+def analyze_integration_target_dependencies(integration_targets: list[IntegrationTarget]) -> dict[str, set[str]]:
+ """Analyze the given list of integration test targets and return a dictionary expressing target names and the target names which depend on them."""
+ real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/'
+
+ role_targets = [target for target in integration_targets if target.type == 'role']
+ hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases)
+
+ dependencies: collections.defaultdict[str, set[str]] = collections.defaultdict(set)
+
+ # handle setup dependencies
+ for target in integration_targets:
+ for setup_target_name in target.setup_always + target.setup_once:
+ dependencies[setup_target_name].add(target.name)
+
+ # handle target dependencies
+ for target in integration_targets:
+ for need_target in target.needs_target:
+ dependencies[need_target].add(target.name)
+
+ # handle symlink dependencies between targets
+ # this use case is supported, but discouraged
+ for target in integration_targets:
+ for path in data_context().content.walk_files(target.path):
+ if not os.path.islink(to_bytes(path.rstrip(os.path.sep))):
+ continue
+
+ real_link_path = os.path.realpath(path)
+
+ if not real_link_path.startswith(real_target_root):
+ continue
+
+ link_target = real_link_path[len(real_target_root):].split('/')[0]
+
+ if link_target == target.name:
+ continue
+
+ dependencies[link_target].add(target.name)
+
+ # intentionally primitive analysis of role meta to avoid a dependency on pyyaml
+ # script based targets are scanned as they may execute a playbook with role dependencies
+ for target in integration_targets:
+ meta_dir = os.path.join(target.path, 'meta')
+
+ if not os.path.isdir(meta_dir):
+ continue
+
+ meta_paths = data_context().content.get_files(meta_dir)
+
+ for meta_path in meta_paths:
+ if os.path.exists(meta_path):
+ # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
+ try:
+ meta_lines = read_text_file(meta_path).splitlines()
+ except UnicodeDecodeError:
+ continue
+
+ for meta_line in meta_lines:
+ if re.search(r'^ *#.*$', meta_line):
+ continue
+
+ if not meta_line.strip():
+ continue
+
+ for hidden_target_name in hidden_role_target_names:
+ if hidden_target_name in meta_line:
+ dependencies[hidden_target_name].add(target.name)
+
+ while True:
+ changes = 0
+
+ for dummy, dependent_target_names in dependencies.items():
+ for dependent_target_name in list(dependent_target_names):
+ new_target_names = dependencies.get(dependent_target_name)
+
+ if new_target_names:
+ for new_target_name in new_target_names:
+ if new_target_name not in dependent_target_names:
+ dependent_target_names.add(new_target_name)
+ changes += 1
+
+ if not changes:
+ break
+
+ for target_name in sorted(dependencies):
+ consumers = dependencies[target_name]
+
+ if not consumers:
+ continue
+
+ display.info('%s:' % target_name, verbosity=4)
+
+ for consumer in sorted(consumers):
+ display.info(' %s' % consumer, verbosity=4)
+
+ return dependencies
+
+
+class CompletionTarget(metaclass=abc.ABCMeta):
+ """Command-line argument completion target base class."""
+ def __init__(self) -> None:
+ self.name = ''
+ self.path = ''
+ self.base_path: t.Optional[str] = None
+ self.modules: tuple[str, ...] = tuple()
+ self.aliases: tuple[str, ...] = tuple()
+
+ def __eq__(self, other):
+ if isinstance(other, CompletionTarget):
+ return self.__repr__() == other.__repr__()
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ return self.name.__lt__(other.name)
+
+ def __gt__(self, other):
+ return self.name.__gt__(other.name)
+
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def __repr__(self):
+ if self.modules:
+ return '%s (%s)' % (self.name, ', '.join(self.modules))
+
+ return self.name
+
+
+class TestTarget(CompletionTarget):
+ """Generic test target."""
+ def __init__(
+ self,
+ path: str,
+ module_path: t.Optional[str],
+ module_prefix: t.Optional[str],
+ base_path: str,
+ symlink: t.Optional[bool] = None,
+ ) -> None:
+ super().__init__()
+
+ if symlink is None:
+ symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep)))
+
+ self.name = path
+ self.path = path
+ self.base_path = base_path + '/' if base_path else None
+ self.symlink = symlink
+
+ name, ext = os.path.splitext(os.path.basename(self.path))
+
+ if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
+ self.module = name[len(module_prefix or ''):].lstrip('_')
+ self.modules = (self.module,)
+ else:
+ self.module = None
+ self.modules = tuple()
+
+ aliases = [self.path, self.module]
+ parts = self.path.split('/')
+
+ for i in range(1, len(parts)):
+ alias = '%s/' % '/'.join(parts[:i])
+ aliases.append(alias)
+
+ aliases = [a for a in aliases if a]
+
+ self.aliases = tuple(sorted(aliases))
+
+
+class IntegrationTargetType(enum.Enum):
+ """Type of integration test target."""
+ CONTROLLER = enum.auto()
+ TARGET = enum.auto()
+ UNKNOWN = enum.auto()
+ CONFLICT = enum.auto()
+
+
+def extract_plugin_references(name: str, aliases: list[str]) -> list[tuple[str, str]]:
+ """Return a list of plugin references found in the given integration test target name and aliases."""
+ plugins = content_plugins()
+ found: list[tuple[str, str]] = []
+
+ for alias in [name] + aliases:
+ plugin_type = 'modules'
+ plugin_name = alias
+
+ if plugin_name in plugins.get(plugin_type, {}):
+ found.append((plugin_type, plugin_name))
+
+ parts = alias.split('_')
+
+ for type_length in (1, 2):
+ if len(parts) > type_length:
+ plugin_type = '_'.join(parts[:type_length])
+ plugin_name = '_'.join(parts[type_length:])
+
+ if plugin_name in plugins.get(plugin_type, {}):
+ found.append((plugin_type, plugin_name))
+
+ return found
+
+
+def categorize_integration_test(name: str, aliases: list[str], force_target: bool) -> tuple[IntegrationTargetType, IntegrationTargetType]:
+ """Return the integration test target types (used and actual) based on the given target name and aliases."""
+ context_controller = f'context/{IntegrationTargetType.CONTROLLER.name.lower()}' in aliases
+ context_target = f'context/{IntegrationTargetType.TARGET.name.lower()}' in aliases or force_target
+ actual_type = None
+ strict_mode = data_context().content.is_ansible
+
+ if context_controller and context_target:
+ target_type = IntegrationTargetType.CONFLICT
+ elif context_controller and not context_target:
+ target_type = IntegrationTargetType.CONTROLLER
+ elif context_target and not context_controller:
+ target_type = IntegrationTargetType.TARGET
+ else:
+ target_types = {IntegrationTargetType.TARGET if plugin_type in ('modules', 'module_utils') else IntegrationTargetType.CONTROLLER
+ for plugin_type, plugin_name in extract_plugin_references(name, aliases)}
+
+ if len(target_types) == 1:
+ target_type = target_types.pop()
+ elif not target_types:
+ actual_type = IntegrationTargetType.UNKNOWN
+ target_type = actual_type if strict_mode else IntegrationTargetType.TARGET
+ else:
+ target_type = IntegrationTargetType.CONFLICT
+
+ return target_type, actual_type or target_type
+
+
+class IntegrationTarget(CompletionTarget):
+ """Integration test target."""
+ non_posix = frozenset((
+ 'network',
+ 'windows',
+ ))
+
+ categories = frozenset(non_posix | frozenset((
+ 'posix',
+ 'module',
+ 'needs',
+ 'skip',
+ )))
+
+ def __init__(self, path: str, modules: frozenset[str], prefixes: dict[str, str]) -> None:
+ super().__init__()
+
+ self.relative_path = os.path.relpath(path, data_context().content.integration_targets_path)
+ self.name = self.relative_path.replace(os.path.sep, '.')
+ self.path = path
+
+ # script_path and type
+
+ file_paths = data_context().content.get_files(path)
+ runme_path = os.path.join(path, 'runme.sh')
+
+ if runme_path in file_paths:
+ self.type = 'script'
+ self.script_path = runme_path
+ else:
+ self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
+ self.script_path = None
+
+ # static_aliases
+
+ aliases_path = os.path.join(path, 'aliases')
+
+ if aliases_path in file_paths:
+ static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
+ else:
+ static_aliases = tuple()
+
+ # modules
+
+ if self.name in modules:
+ module_name = self.name
+ elif self.name.startswith('win_') and self.name[4:] in modules:
+ module_name = self.name[4:]
+ else:
+ module_name = None
+
+ self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
+
+ # groups
+
+ groups = [self.type]
+ groups += [a for a in static_aliases if a not in modules]
+ groups += ['module/%s' % m for m in self.modules]
+
+ if data_context().content.is_ansible and (self.name == 'ansible-test' or self.name.startswith('ansible-test-')):
+ groups.append('ansible-test')
+
+ if not self.modules:
+ groups.append('non_module')
+
+ if 'destructive' not in groups:
+ groups.append('non_destructive')
+
+ if 'needs/httptester' in groups:
+ groups.append('cloud/httptester') # backwards compatibility for when it was not a cloud plugin
+
+ if '_' in self.name:
+ prefix = self.name[:self.name.find('_')]
+ else:
+ prefix = None
+
+ if prefix in prefixes:
+ group = prefixes[prefix]
+
+ if group != prefix:
+ group = '%s/%s' % (group, prefix)
+
+ groups.append(group)
+
+ if self.name.startswith('win_'):
+ groups.append('windows')
+
+ if self.name.startswith('connection_'):
+ groups.append('connection')
+
+ if self.name.startswith('setup_') or self.name.startswith('prepare_'):
+ groups.append('hidden')
+
+ if self.type not in ('script', 'role'):
+ groups.append('hidden')
+
+ targets_relative_path = data_context().content.integration_targets_path
+
+ # Collect skip entries before group expansion to avoid registering more specific skip entries as less specific versions.
+ self.skips = tuple(g for g in groups if g.startswith('skip/'))
+
+ # Collect file paths before group expansion to avoid including the directories.
+ # Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
+ self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
+ g.startswith('needs/file/') and not g.startswith('needs/file/%s/' % targets_relative_path))))
+
+ # network platform
+ networks = [g.split('/')[1] for g in groups if g.startswith('network/')]
+ self.network_platform = networks[0] if networks else None
+
+ for group in itertools.islice(groups, 0, len(groups)):
+ if '/' in group:
+ parts = group.split('/')
+ for i in range(1, len(parts)):
+ groups.append('/'.join(parts[:i]))
+
+ if not any(g in self.non_posix for g in groups):
+ groups.append('posix')
+
+ # target type
+
+ # targets which are non-posix test against the target, even if they also support posix
+ force_target = any(group in self.non_posix for group in groups)
+
+ target_type, actual_type = categorize_integration_test(self.name, list(static_aliases), force_target)
+
+ groups.extend(['context/', f'context/{target_type.name.lower()}'])
+
+ if target_type != actual_type:
+ # allow users to query for the actual type
+ groups.extend(['context/', f'context/{actual_type.name.lower()}'])
+
+ self.target_type = target_type
+ self.actual_type = actual_type
+
+ # aliases
+
+ aliases = [self.name] + \
+ ['%s/' % g for g in groups] + \
+ ['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
+
+ if 'hidden/' in aliases:
+ aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
+
+ self.aliases = tuple(sorted(set(aliases)))
+
+ # configuration
+
+ self.retry_never = 'retry/never/' in self.aliases
+
+ self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
+ self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
+ self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
+
+
+class TargetPatternsNotMatched(ApplicationError):
+ """One or more targets were not matched when a match was required."""
+ def __init__(self, patterns: set[str]) -> None:
+ self.patterns = sorted(patterns)
+
+ if len(patterns) > 1:
+ message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
+ else:
+ message = 'Target pattern not matched: %s' % self.patterns[0]
+
+ super().__init__(message)
+
+
+TCompletionTarget = t.TypeVar('TCompletionTarget', bound=CompletionTarget)
+TIntegrationTarget = t.TypeVar('TIntegrationTarget', bound=IntegrationTarget)
diff --git a/test/lib/ansible_test/_internal/test.py b/test/lib/ansible_test/_internal/test.py
new file mode 100644
index 0000000..211635c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/test.py
@@ -0,0 +1,469 @@
+"""Classes for storing and processing test results."""
+from __future__ import annotations
+
+import collections.abc as c
+import datetime
+import typing as t
+
+from .util import (
+ display,
+)
+
+from .util_common import (
+ get_docs_url,
+ write_text_test_results,
+ write_json_test_results,
+ ResultType,
+)
+
+from .metadata import (
+ Metadata,
+)
+
+from .config import (
+ TestConfig,
+)
+
+from . import junit_xml
+
+
+def calculate_best_confidence(choices: tuple[tuple[str, int], ...], metadata: Metadata) -> int:
+ """Return the best confidence value available from the given choices and metadata."""
+ best_confidence = 0
+
+ for path, line in choices:
+ confidence = calculate_confidence(path, line, metadata)
+ best_confidence = max(confidence, best_confidence)
+
+ return best_confidence
+
+
+def calculate_confidence(path: str, line: int, metadata: Metadata) -> int:
+ """Return the confidence level for a test result associated with the given file path and line number."""
+ ranges = metadata.changes.get(path)
+
+ # no changes were made to the file
+ if not ranges:
+ return 0
+
+ # changes were made to the same file and line
+ if any(r[0] <= line <= r[1] in r for r in ranges):
+ return 100
+
+ # changes were made to the same file and the line number is unknown
+ if line == 0:
+ return 75
+
+ # changes were made to the same file and the line number is different
+ return 50
+
+
+class TestResult:
+ """Base class for test results."""
+ def __init__(self, command: str, test: str, python_version: t.Optional[str] = None) -> None:
+ self.command = command
+ self.test = test
+ self.python_version = python_version
+ self.name = self.test or self.command
+
+ if self.python_version:
+ self.name += '-python-%s' % self.python_version
+
+ def write(self, args: TestConfig) -> None:
+ """Write the test results to various locations."""
+ self.write_console()
+ self.write_bot(args)
+
+ if args.lint:
+ self.write_lint()
+
+ if args.junit:
+ self.write_junit(args)
+
+ def write_console(self) -> None:
+ """Write results to console."""
+
+ def write_lint(self) -> None:
+ """Write lint results to stdout."""
+
+ def write_bot(self, args: TestConfig) -> None:
+ """Write results to a file for ansibullbot to consume."""
+
+ def write_junit(self, args: TestConfig) -> None:
+ """Write results to a junit XML file."""
+
+ def create_result_name(self, extension: str) -> str:
+ """Return the name of the result file using the given extension."""
+ name = 'ansible-test-%s' % self.command
+
+ if self.test:
+ name += '-%s' % self.test
+
+ if self.python_version:
+ name += '-python-%s' % self.python_version
+
+ name += extension
+
+ return name
+
+ def save_junit(self, args: TestConfig, test_case: junit_xml.TestCase) -> None:
+ """Save the given test case results to disk as JUnit XML."""
+ suites = junit_xml.TestSuites(
+ suites=[
+ junit_xml.TestSuite(
+ name='ansible-test',
+ cases=[test_case],
+ timestamp=datetime.datetime.utcnow(),
+ ),
+ ],
+ )
+
+ report = suites.to_pretty_xml()
+
+ if args.explain:
+ return
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
+
+
+class TestTimeout(TestResult):
+ """Test timeout."""
+ def __init__(self, timeout_duration: int) -> None:
+ super().__init__(command='timeout', test='')
+
+ self.timeout_duration = timeout_duration
+
+ def write(self, args: TestConfig) -> None:
+ """Write the test results to various locations."""
+ message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
+
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ output = '''
+One or more of the following situations may be responsible:
+
+- Code changes have resulted in tests that hang or run for an excessive amount of time.
+- Tests have been added which exceed the time limit when combined with existing tests.
+- Test infrastructure and/or external dependencies are operating slower than normal.'''
+
+ if args.coverage:
+ output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
+
+ output += '\n\nConsult the console log for additional details on where the timeout occurred.'
+
+ timestamp = datetime.datetime.utcnow()
+
+ suites = junit_xml.TestSuites(
+ suites=[
+ junit_xml.TestSuite(
+ name='ansible-test',
+ timestamp=timestamp,
+ cases=[
+ junit_xml.TestCase(
+ name='timeout',
+ classname='timeout',
+ errors=[
+ junit_xml.TestError(
+ message=message,
+ ),
+ ],
+ ),
+ ],
+ )
+ ],
+ )
+
+ report = suites.to_pretty_xml()
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
+
+
+class TestSuccess(TestResult):
+ """Test success."""
+ def write_junit(self, args: TestConfig) -> None:
+ """Write results to a junit XML file."""
+ test_case = junit_xml.TestCase(classname=self.command, name=self.name)
+
+ self.save_junit(args, test_case)
+
+
+class TestSkipped(TestResult):
+ """Test skipped."""
+ def __init__(self, command: str, test: str, python_version: t.Optional[str] = None) -> None:
+ super().__init__(command, test, python_version)
+
+ self.reason: t.Optional[str] = None
+
+ def write_console(self) -> None:
+ """Write results to console."""
+ if self.reason:
+ display.warning(self.reason)
+ else:
+ display.info('No tests applicable.', verbosity=1)
+
+ def write_junit(self, args: TestConfig) -> None:
+ """Write results to a junit XML file."""
+ test_case = junit_xml.TestCase(
+ classname=self.command,
+ name=self.name,
+ skipped=self.reason or 'No tests applicable.',
+ )
+
+ self.save_junit(args, test_case)
+
+
+class TestFailure(TestResult):
+ """Test failure."""
+ def __init__(
+ self,
+ command: str,
+ test: str,
+ python_version: t.Optional[str] = None,
+ messages: t.Optional[c.Sequence[TestMessage]] = None,
+ summary: t.Optional[str] = None,
+ ):
+ super().__init__(command, test, python_version)
+
+ if messages:
+ messages = sorted(messages)
+ else:
+ messages = []
+
+ self.messages = messages
+ self.summary = summary
+
+ def write(self, args: TestConfig) -> None:
+ """Write the test results to various locations."""
+ if args.metadata.changes:
+ self.populate_confidence(args.metadata)
+
+ super().write(args)
+
+ def write_console(self) -> None:
+ """Write results to console."""
+ if self.summary:
+ display.error(self.summary)
+ else:
+ if self.python_version:
+ specifier = ' on python %s' % self.python_version
+ else:
+ specifier = ''
+
+ display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
+
+ for message in self.messages:
+ display.error(message.format(show_confidence=True))
+
+ doc_url = self.find_docs()
+ if doc_url:
+ display.info('See documentation for help: %s' % doc_url)
+
+ def write_lint(self) -> None:
+ """Write lint results to stdout."""
+ if self.summary:
+ command = self.format_command()
+ message = 'The test `%s` failed. See stderr output for details.' % command
+ path = ''
+ message = TestMessage(message, path)
+ print(message) # display goes to stderr, this should be on stdout
+ else:
+ for message in self.messages:
+ print(message) # display goes to stderr, this should be on stdout
+
+ def write_junit(self, args: TestConfig) -> None:
+ """Write results to a junit XML file."""
+ title = self.format_title()
+ output = self.format_block()
+
+ test_case = junit_xml.TestCase(
+ classname=self.command,
+ name=self.name,
+ failures=[
+ junit_xml.TestFailure(
+ message=title,
+ output=output,
+ ),
+ ],
+ )
+
+ self.save_junit(args, test_case)
+
+ def write_bot(self, args: TestConfig) -> None:
+ """Write results to a file for ansibullbot to consume."""
+ docs = self.find_docs()
+ message = self.format_title(help_link=docs)
+ output = self.format_block()
+
+ if self.messages:
+ verified = all((m.confidence or 0) >= 50 for m in self.messages)
+ else:
+ verified = False
+
+ bot_data = dict(
+ verified=verified,
+ docs=docs,
+ results=[
+ dict(
+ message=message,
+ output=output,
+ ),
+ ],
+ )
+
+ if args.explain:
+ return
+
+ write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
+
+ def populate_confidence(self, metadata: Metadata) -> None:
+ """Populate test result confidence using the provided metadata."""
+ for message in self.messages:
+ if message.confidence is None:
+ message.confidence = calculate_confidence(message.path, message.line, metadata)
+
+ def format_command(self) -> str:
+ """Return a string representing the CLI command associated with the test failure."""
+ command = 'ansible-test %s' % self.command
+
+ if self.test:
+ command += ' --test %s' % self.test
+
+ if self.python_version:
+ command += ' --python %s' % self.python_version
+
+ return command
+
+ def find_docs(self) -> t.Optional[str]:
+ """Return the docs URL for this test or None if there is no docs URL."""
+ if self.command != 'sanity':
+ return None # only sanity tests have docs links
+
+ filename = f'{self.test}.html' if self.test else ''
+ url = get_docs_url(f'https://docs.ansible.com/ansible-core/devel/dev_guide/testing/{self.command}/{filename}')
+
+ return url
+
+ def format_title(self, help_link: t.Optional[str] = None) -> str:
+ """Return a string containing a title/heading for this test failure, including an optional help link to explain the test."""
+ command = self.format_command()
+
+ if self.summary:
+ reason = 'the error'
+ else:
+ reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
+
+ if help_link:
+ help_link_markup = ' [[explain](%s)]' % help_link
+ else:
+ help_link_markup = ''
+
+ title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
+
+ return title
+
+ def format_block(self) -> str:
+ """Format the test summary or messages as a block of text and return the result."""
+ if self.summary:
+ block = self.summary
+ else:
+ block = '\n'.join(m.format() for m in self.messages)
+
+ message = block.strip()
+
+ # Hack to remove ANSI color reset code from SubprocessError messages.
+ message = message.replace(display.clear, '')
+
+ return message
+
+
+class TestMessage:
+ """Single test message for one file."""
+ def __init__(
+ self,
+ message: str,
+ path: str,
+ line: int = 0,
+ column: int = 0,
+ level: str = 'error',
+ code: t.Optional[str] = None,
+ confidence: t.Optional[int] = None,
+ ):
+ self.__path = path
+ self.__line = line
+ self.__column = column
+ self.__level = level
+ self.__code = code
+ self.__message = message
+
+ self.confidence = confidence
+
+ @property
+ def path(self) -> str:
+ """Return the path."""
+ return self.__path
+
+ @property
+ def line(self) -> int:
+ """Return the line number, or 0 if none is available."""
+ return self.__line
+
+ @property
+ def column(self) -> int:
+ """Return the column number, or 0 if none is available."""
+ return self.__column
+
+ @property
+ def level(self) -> str:
+ """Return the level."""
+ return self.__level
+
+ @property
+ def code(self) -> t.Optional[str]:
+ """Return the code, if any."""
+ return self.__code
+
+ @property
+ def message(self) -> str:
+ """Return the message."""
+ return self.__message
+
+ @property
+ def tuple(self) -> tuple[str, int, int, str, t.Optional[str], str]:
+ """Return a tuple with all the immutable values of this test message."""
+ return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
+
+ def __lt__(self, other):
+ return self.tuple < other.tuple
+
+ def __le__(self, other):
+ return self.tuple <= other.tuple
+
+ def __eq__(self, other):
+ return self.tuple == other.tuple
+
+ def __ne__(self, other):
+ return self.tuple != other.tuple
+
+ def __gt__(self, other):
+ return self.tuple > other.tuple
+
+ def __ge__(self, other):
+ return self.tuple >= other.tuple
+
+ def __hash__(self):
+ return hash(self.tuple)
+
+ def __str__(self):
+ return self.format()
+
+ def format(self, show_confidence: bool = False) -> str:
+ """Return a string representation of this message, optionally including the confidence level."""
+ if self.__code:
+ msg = '%s: %s' % (self.__code, self.__message)
+ else:
+ msg = self.__message
+
+ if show_confidence and self.confidence is not None:
+ msg += ' (%d%%)' % self.confidence
+
+ return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py
new file mode 100644
index 0000000..edaf1b5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/thread.py
@@ -0,0 +1,82 @@
+"""Python threading tools."""
+from __future__ import annotations
+
+import collections.abc as c
+import contextlib
+import functools
+import sys
+import threading
+import queue
+import typing as t
+
+
+TCallable = t.TypeVar('TCallable', bound=t.Callable[..., t.Any])
+
+
+class WrappedThread(threading.Thread):
+ """Wrapper around Thread which captures results and exceptions."""
+ def __init__(self, action: c.Callable[[], t.Any]) -> None:
+ super().__init__()
+ self._result: queue.Queue[t.Any] = queue.Queue()
+ self.action = action
+ self.result = None
+
+ def run(self) -> None:
+ """
+ Run action and capture results or exception.
+ Do not override. Do not call directly. Executed by the start() method.
+ """
+ # We truly want to catch anything that the worker thread might do including call sys.exit.
+ # Therefore, we catch *everything* (including old-style class exceptions)
+ # noinspection PyBroadException
+ try:
+ self._result.put((self.action(), None))
+ # pylint: disable=locally-disabled, bare-except
+ except: # noqa
+ self._result.put((None, sys.exc_info()))
+
+ def wait_for_result(self) -> t.Any:
+ """Wait for thread to exit and return the result or raise an exception."""
+ result, exception = self._result.get()
+
+ if exception:
+ raise exception[1].with_traceback(exception[2])
+
+ self.result = result
+
+ return result
+
+
+def mutex(func: TCallable) -> TCallable:
+ """Enforce exclusive access on a decorated function."""
+ lock = threading.Lock()
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ """Wrapper around `func` which uses a lock to provide exclusive access to the function."""
+ with lock:
+ return func(*args, **kwargs)
+
+ return wrapper # type: ignore[return-value] # requires https://www.python.org/dev/peps/pep-0612/ support
+
+
+__named_lock = threading.Lock()
+__named_locks: dict[str, threading.Lock] = {}
+
+
+@contextlib.contextmanager
+def named_lock(name: str) -> c.Iterator[bool]:
+ """
+ Context manager that provides named locks using threading.Lock instances.
+ Once named lock instances are created they are not deleted.
+ Returns True if this is the first instance of the named lock, otherwise False.
+ """
+ with __named_lock:
+ if lock_instance := __named_locks.get(name):
+ first = False
+ else:
+ first = True
+ lock_instance = __named_locks[name] = threading.Lock()
+
+ with lock_instance:
+ yield first
diff --git a/test/lib/ansible_test/_internal/timeout.py b/test/lib/ansible_test/_internal/timeout.py
new file mode 100644
index 0000000..90ba583
--- /dev/null
+++ b/test/lib/ansible_test/_internal/timeout.py
@@ -0,0 +1,93 @@
+"""Timeout management for tests."""
+from __future__ import annotations
+
+import datetime
+import functools
+import os
+import signal
+import time
+import typing as t
+
+from .io import (
+ read_json_file,
+)
+
+from .config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from .util import (
+ display,
+ ApplicationError,
+)
+
+from .thread import (
+ WrappedThread,
+)
+
+from .constants import (
+ TIMEOUT_PATH,
+)
+
+from .test import (
+ TestTimeout,
+)
+
+
+def get_timeout() -> t.Optional[dict[str, t.Any]]:
+ """Return details about the currently set timeout, if any, otherwise return None."""
+ if not os.path.exists(TIMEOUT_PATH):
+ return None
+
+ data = read_json_file(TIMEOUT_PATH)
+ data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ')
+
+ return data
+
+
+def configure_timeout(args: CommonConfig) -> None:
+ """Configure the timeout."""
+ if isinstance(args, TestConfig):
+ configure_test_timeout(args) # only tests are subject to the timeout
+
+
+def configure_test_timeout(args: TestConfig) -> None:
+ """Configure the test timeout."""
+ timeout = get_timeout()
+
+ if not timeout:
+ return
+
+ timeout_start = datetime.datetime.utcnow()
+ timeout_duration = timeout['duration']
+ timeout_deadline = timeout['deadline']
+ timeout_remaining = timeout_deadline - timeout_start
+
+ test_timeout = TestTimeout(timeout_duration)
+
+ if timeout_remaining <= datetime.timedelta():
+ test_timeout.write(args)
+
+ raise ApplicationError('The %d minute test timeout expired %s ago at %s.' % (
+ timeout_duration, timeout_remaining * -1, timeout_deadline))
+
+ display.info('The %d minute test timeout expires in %s at %s.' % (
+ timeout_duration, timeout_remaining, timeout_deadline), verbosity=1)
+
+ def timeout_handler(_dummy1: t.Any, _dummy2: t.Any) -> None:
+ """Runs when SIGUSR1 is received."""
+ test_timeout.write(args)
+
+ raise ApplicationError('Tests aborted after exceeding the %d minute time limit.' % timeout_duration)
+
+ def timeout_waiter(timeout_seconds: int) -> None:
+ """Background thread which will kill the current process if the timeout elapses."""
+ time.sleep(timeout_seconds)
+ os.kill(os.getpid(), signal.SIGUSR1)
+
+ signal.signal(signal.SIGUSR1, timeout_handler)
+
+ instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.seconds))
+ instance.daemon = True
+ instance.start()
diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py
new file mode 100644
index 0000000..ec485a2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util.py
@@ -0,0 +1,1146 @@
+"""Miscellaneous utility functions and classes."""
+from __future__ import annotations
+
+import abc
+import collections.abc as c
+import enum
+import fcntl
+import importlib.util
+import inspect
+import json
+import keyword
+import os
+import platform
+import pkgutil
+import random
+import re
+import shutil
+import stat
+import string
+import subprocess
+import sys
+import time
+import functools
+import shlex
+import typing as t
+
+from struct import unpack, pack
+from termios import TIOCGWINSZ
+
+try:
+ from typing_extensions import TypeGuard # TypeGuard was added in Python 3.10
+except ImportError:
+ TypeGuard = None
+
+from .locale_util import (
+ LOCALE_WARNING,
+ CONFIGURED_LOCALE,
+)
+
+from .encoding import (
+ to_bytes,
+ to_optional_bytes,
+ to_optional_text,
+)
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+from .thread import (
+ mutex,
+ WrappedThread,
+)
+
+from .constants import (
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+C = t.TypeVar('C')
+TBase = t.TypeVar('TBase')
+TKey = t.TypeVar('TKey')
+TValue = t.TypeVar('TValue')
+
+PYTHON_PATHS: dict[str, str] = {}
+
+COVERAGE_CONFIG_NAME = 'coveragerc'
+
+ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+# assume running from install
+ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT)
+ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
+ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible')
+ANSIBLE_SOURCE_ROOT = None
+
+if not os.path.exists(ANSIBLE_LIB_ROOT):
+ # running from source
+ ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT)))
+ ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin')
+ ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible')
+ ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT
+
+ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data')
+ANSIBLE_TEST_UTIL_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_util')
+ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config')
+
+ANSIBLE_TEST_CONTROLLER_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'controller')
+ANSIBLE_TEST_TARGET_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'target')
+
+ANSIBLE_TEST_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'tools')
+ANSIBLE_TEST_TARGET_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'tools')
+
+# Modes are set to allow all users the same level of access.
+# This permits files to be used in tests that change users.
+# The only exception is write access to directories for the user creating them.
+# This avoids having to modify the directory permissions a second time.
+
+MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+
+MODE_FILE = MODE_READ
+MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+
+MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH
+
+
+class OutputStream(enum.Enum):
+ """The output stream to use when running a subprocess and redirecting/capturing stdout or stderr."""
+
+ ORIGINAL = enum.auto()
+ AUTO = enum.auto()
+
+ def get_buffer(self, original: t.BinaryIO) -> t.BinaryIO:
+ """Return the correct output buffer to use, taking into account the given original buffer."""
+
+ if self == OutputStream.ORIGINAL:
+ return original
+
+ if self == OutputStream.AUTO:
+ return display.fd.buffer
+
+ raise NotImplementedError(str(self))
+
+
+class Architecture:
+ """
+ Normalized architecture names.
+ These are the architectures supported by ansible-test, such as when provisioning remote instances.
+ """
+ X86_64 = 'x86_64'
+ AARCH64 = 'aarch64'
+
+
+REMOTE_ARCHITECTURES = list(value for key, value in Architecture.__dict__.items() if not key.startswith('__'))
+
+
+def is_valid_identifier(value: str) -> bool:
+ """Return True if the given value is a valid non-keyword Python identifier, otherwise return False."""
+ return value.isidentifier() and not keyword.iskeyword(value)
+
+
+def cache(func: c.Callable[[], TValue]) -> c.Callable[[], TValue]:
+ """Enforce exclusive access on a decorated function and cache the result."""
+ storage: dict[None, TValue] = {}
+ sentinel = object()
+
+ @functools.wraps(func)
+ def cache_func():
+ """Cache the return value from func."""
+ if (value := storage.get(None, sentinel)) is sentinel:
+ value = storage[None] = func()
+
+ return value
+
+ wrapper = mutex(cache_func)
+
+ return wrapper
+
+
+@mutex
+def detect_architecture(python: str) -> t.Optional[str]:
+ """Detect the architecture of the specified Python and return a normalized version, or None if it cannot be determined."""
+ results: dict[str, t.Optional[str]]
+
+ try:
+ results = detect_architecture.results # type: ignore[attr-defined]
+ except AttributeError:
+ results = detect_architecture.results = {} # type: ignore[attr-defined]
+
+ if python in results:
+ return results[python]
+
+ if python == sys.executable or os.path.realpath(python) == os.path.realpath(sys.executable):
+ uname = platform.uname()
+ else:
+ data = raw_command([python, '-c', 'import json, platform; print(json.dumps(platform.uname()));'], capture=True)[0]
+ uname = json.loads(data)
+
+ translation = {
+ 'x86_64': Architecture.X86_64, # Linux, macOS
+ 'amd64': Architecture.X86_64, # FreeBSD
+ 'aarch64': Architecture.AARCH64, # Linux, FreeBSD
+ 'arm64': Architecture.AARCH64, # FreeBSD
+ }
+
+ candidates = []
+
+ if len(uname) >= 5:
+ candidates.append(uname[4])
+
+ if len(uname) >= 6:
+ candidates.append(uname[5])
+
+ candidates = sorted(set(candidates))
+ architectures = sorted(set(arch for arch in [translation.get(candidate) for candidate in candidates] if arch))
+
+ architecture: t.Optional[str] = None
+
+ if not architectures:
+ display.warning(f'Unable to determine architecture for Python interpreter "{python}" from: {candidates}')
+ elif len(architectures) == 1:
+ architecture = architectures[0]
+ display.info(f'Detected architecture {architecture} for Python interpreter: {python}', verbosity=1)
+ else:
+ display.warning(f'Conflicting architectures detected ({architectures}) for Python interpreter "{python}" from: {candidates}')
+
+ results[python] = architecture
+
+ return architecture
+
+
+def filter_args(args: list[str], filters: dict[str, int]) -> list[str]:
+ """Return a filtered version of the given command line arguments."""
+ remaining = 0
+ result = []
+
+ for arg in args:
+ if not arg.startswith('-') and remaining:
+ remaining -= 1
+ continue
+
+ remaining = 0
+
+ parts = arg.split('=', 1)
+ key = parts[0]
+
+ if key in filters:
+ remaining = filters[key] - len(parts) + 1
+ continue
+
+ result.append(arg)
+
+ return result
+
+
+def read_lines_without_comments(path: str, remove_blank_lines: bool = False, optional: bool = False) -> list[str]:
+ """
+ Returns lines from the specified text file with comments removed.
+ Comments are any content from a hash symbol to the end of a line.
+ Any spaces immediately before a comment are also removed.
+ """
+ if optional and not os.path.exists(path):
+ return []
+
+ lines = read_text_file(path).splitlines()
+
+ lines = [re.sub(r' *#.*$', '', line) for line in lines]
+
+ if remove_blank_lines:
+ lines = [line for line in lines if line]
+
+ return lines
+
+
+def exclude_none_values(data: dict[TKey, t.Optional[TValue]]) -> dict[TKey, TValue]:
+ """Return the provided dictionary with any None values excluded."""
+ return dict((key, value) for key, value in data.items() if value is not None)
+
+
+def find_executable(executable: str, cwd: t.Optional[str] = None, path: t.Optional[str] = None, required: t.Union[bool, str] = True) -> t.Optional[str]:
+ """
+ Find the specified executable and return the full path, or None if it could not be found.
+ If required is True an exception will be raised if the executable is not found.
+ If required is set to 'warning' then a warning will be shown if the executable is not found.
+ """
+ match = None
+ real_cwd = os.getcwd()
+
+ if not cwd:
+ cwd = real_cwd
+
+ if os.path.dirname(executable):
+ target = os.path.join(cwd, executable)
+ if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
+ match = executable
+ else:
+ if path is None:
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if path:
+ path_dirs = path.split(os.path.pathsep)
+ seen_dirs = set()
+
+ for path_dir in path_dirs:
+ if path_dir in seen_dirs:
+ continue
+
+ seen_dirs.add(path_dir)
+
+ if os.path.abspath(path_dir) == real_cwd:
+ path_dir = cwd
+
+ candidate = os.path.join(path_dir, executable)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ match = candidate
+ break
+
+ if not match and required:
+ message = 'Required program "%s" not found.' % executable
+
+ if required != 'warning':
+ raise ApplicationError(message)
+
+ display.warning(message)
+
+ return match
+
+
+def find_python(version: str, path: t.Optional[str] = None, required: bool = True) -> t.Optional[str]:
+ """
+ Find and return the full path to the specified Python version.
+ If required, an exception will be raised not found.
+ If not required, None will be returned if not found.
+ """
+ version_info = str_to_version(version)
+
+ if not path and version_info == sys.version_info[:len(version_info)]:
+ python_bin = sys.executable
+ else:
+ python_bin = find_executable('python%s' % version, path=path, required=required)
+
+ return python_bin
+
+
+@cache
+def get_ansible_version() -> str:
+ """Return the Ansible version."""
+ # ansible may not be in our sys.path
+ # avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation
+ load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release')
+
+ # noinspection PyUnresolvedReferences
+ from ansible_release import __version__ as ansible_version # pylint: disable=import-error
+
+ return ansible_version
+
+
+@cache
+def get_available_python_versions() -> dict[str, str]:
+ """Return a dictionary indicating which supported Python versions are available."""
+ return dict((version, path) for version, path in ((version, find_python(version, required=False)) for version in SUPPORTED_PYTHON_VERSIONS) if path)
+
+
+def raw_command(
+ cmd: c.Iterable[str],
+ capture: bool,
+ env: t.Optional[dict[str, str]] = None,
+ data: t.Optional[str] = None,
+ cwd: t.Optional[str] = None,
+ explain: bool = False,
+ stdin: t.Optional[t.Union[t.IO[bytes], int]] = None,
+ stdout: t.Optional[t.Union[t.IO[bytes], int]] = None,
+ interactive: bool = False,
+ output_stream: t.Optional[OutputStream] = None,
+ cmd_verbosity: int = 1,
+ str_errors: str = 'strict',
+ error_callback: t.Optional[c.Callable[[SubprocessError], None]] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command and return stdout and stderr as a tuple."""
+ output_stream = output_stream or OutputStream.AUTO
+
+ if capture and interactive:
+ raise InternalError('Cannot combine capture=True with interactive=True.')
+
+ if data and interactive:
+ raise InternalError('Cannot combine data with interactive=True.')
+
+ if stdin and interactive:
+ raise InternalError('Cannot combine stdin with interactive=True.')
+
+ if stdout and interactive:
+ raise InternalError('Cannot combine stdout with interactive=True.')
+
+ if stdin and data:
+ raise InternalError('Cannot combine stdin with data.')
+
+ if stdout and not capture:
+ raise InternalError('Redirection of stdout requires capture=True to avoid redirection of stderr to stdout.')
+
+ if output_stream != OutputStream.AUTO and capture:
+ raise InternalError(f'Cannot combine {output_stream=} with capture=True.')
+
+ if output_stream != OutputStream.AUTO and interactive:
+ raise InternalError(f'Cannot combine {output_stream=} with interactive=True.')
+
+ if not cwd:
+ cwd = os.getcwd()
+
+ if not env:
+ env = common_environment()
+
+ cmd = list(cmd)
+
+ escaped_cmd = shlex.join(cmd)
+
+ if capture:
+ description = 'Run'
+ elif interactive:
+ description = 'Interactive'
+ else:
+ description = 'Stream'
+
+ description += ' command'
+
+ with_types = []
+
+ if data:
+ with_types.append('data')
+
+ if stdin:
+ with_types.append('stdin')
+
+ if stdout:
+ with_types.append('stdout')
+
+ if with_types:
+ description += f' with {"/".join(with_types)}'
+
+ display.info(f'{description}: {escaped_cmd}', verbosity=cmd_verbosity, truncate=True)
+ display.info('Working directory: %s' % cwd, verbosity=2)
+
+ program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
+
+ if program:
+ display.info('Program found: %s' % program, verbosity=2)
+
+ for key in sorted(env.keys()):
+ display.info('%s=%s' % (key, env[key]), verbosity=2)
+
+ if explain:
+ return None, None
+
+ communicate = False
+
+ if stdin is not None:
+ data = None
+ elif data is not None:
+ stdin = subprocess.PIPE
+ communicate = True
+ elif interactive:
+ pass # allow the subprocess access to our stdin
+ else:
+ stdin = subprocess.DEVNULL
+
+ if not interactive:
+ # When not running interactively, send subprocess stdout/stderr through a pipe.
+ # This isolates the stdout/stderr of the subprocess from the current process, and also hides the current TTY from it, if any.
+ # This prevents subprocesses from sharing stdout/stderr with the current process or each other.
+ # Doing so allows subprocesses to safely make changes to their file handles, such as making them non-blocking (ssh does this).
+ # This also maintains consistency between local testing and CI systems, which typically do not provide a TTY.
+ # To maintain output ordering, a single pipe is used for both stdout/stderr when not capturing output unless the output stream is ORIGINAL.
+ stdout = stdout or subprocess.PIPE
+ stderr = subprocess.PIPE if capture or output_stream == OutputStream.ORIGINAL else subprocess.STDOUT
+ communicate = True
+ else:
+ stderr = None
+
+ start = time.time()
+ process = None
+
+ try:
+ try:
+ cmd_bytes = [to_bytes(arg) for arg in cmd]
+ env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
+ process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd) # pylint: disable=consider-using-with
+ except FileNotFoundError as ex:
+ raise ApplicationError('Required program "%s" not found.' % cmd[0]) from ex
+
+ if communicate:
+ data_bytes = to_optional_bytes(data)
+ stdout_bytes, stderr_bytes = communicate_with_process(process, data_bytes, stdout == subprocess.PIPE, stderr == subprocess.PIPE, capture=capture,
+ output_stream=output_stream)
+ stdout_text = to_optional_text(stdout_bytes, str_errors) or ''
+ stderr_text = to_optional_text(stderr_bytes, str_errors) or ''
+ else:
+ process.wait()
+ stdout_text, stderr_text = None, None
+ finally:
+ if process and process.returncode is None:
+ process.kill()
+ display.info('') # the process we're interrupting may have completed a partial line of output
+ display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.')
+
+ status = process.returncode
+ runtime = time.time() - start
+
+ display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
+
+ if status == 0:
+ return stdout_text, stderr_text
+
+ raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime, error_callback)
+
+
+def communicate_with_process(
+ process: subprocess.Popen,
+ stdin: t.Optional[bytes],
+ stdout: bool,
+ stderr: bool,
+ capture: bool,
+ output_stream: OutputStream,
+) -> tuple[bytes, bytes]:
+ """Communicate with the specified process, handling stdin/stdout/stderr as requested."""
+ threads: list[WrappedThread] = []
+ reader: t.Type[ReaderThread]
+
+ if capture:
+ reader = CaptureThread
+ else:
+ reader = OutputThread
+
+ if stdin is not None:
+ threads.append(WriterThread(process.stdin, stdin))
+
+ if stdout:
+ stdout_reader = reader(process.stdout, output_stream.get_buffer(sys.stdout.buffer))
+ threads.append(stdout_reader)
+ else:
+ stdout_reader = None
+
+ if stderr:
+ stderr_reader = reader(process.stderr, output_stream.get_buffer(sys.stderr.buffer))
+ threads.append(stderr_reader)
+ else:
+ stderr_reader = None
+
+ for thread in threads:
+ thread.start()
+
+ for thread in threads:
+ try:
+ thread.wait_for_result()
+ except Exception as ex: # pylint: disable=broad-except
+ display.error(str(ex))
+
+ if isinstance(stdout_reader, ReaderThread):
+ stdout_bytes = b''.join(stdout_reader.lines)
+ else:
+ stdout_bytes = b''
+
+ if isinstance(stderr_reader, ReaderThread):
+ stderr_bytes = b''.join(stderr_reader.lines)
+ else:
+ stderr_bytes = b''
+
+ process.wait()
+
+ return stdout_bytes, stderr_bytes
+
+
+class WriterThread(WrappedThread):
+ """Thread to write data to stdin of a subprocess."""
+ def __init__(self, handle: t.IO[bytes], data: bytes) -> None:
+ super().__init__(self._run)
+
+ self.handle = handle
+ self.data = data
+
+ def _run(self) -> None:
+ """Workload to run on a thread."""
+ try:
+ self.handle.write(self.data)
+ self.handle.flush()
+ finally:
+ self.handle.close()
+
+
+class ReaderThread(WrappedThread, metaclass=abc.ABCMeta):
+ """Thread to read stdout from a subprocess."""
+ def __init__(self, handle: t.IO[bytes], buffer: t.BinaryIO) -> None:
+ super().__init__(self._run)
+
+ self.handle = handle
+ self.buffer = buffer
+ self.lines: list[bytes] = []
+
+ @abc.abstractmethod
+ def _run(self) -> None:
+ """Workload to run on a thread."""
+
+
+class CaptureThread(ReaderThread):
+ """Thread to capture stdout from a subprocess into a buffer."""
+ def _run(self) -> None:
+ """Workload to run on a thread."""
+ src = self.handle
+ dst = self.lines
+
+ try:
+ for line in src:
+ dst.append(line)
+ finally:
+ src.close()
+
+
+class OutputThread(ReaderThread):
+ """Thread to pass stdout from a subprocess to stdout."""
+ def _run(self) -> None:
+ """Workload to run on a thread."""
+ src = self.handle
+ dst = self.buffer
+
+ try:
+ for line in src:
+ dst.write(line)
+ dst.flush()
+ finally:
+ src.close()
+
+
+def common_environment() -> dict[str, str]:
+ """Common environment used for executing all programs."""
+ env = dict(
+ LC_ALL=CONFIGURED_LOCALE,
+ PATH=os.environ.get('PATH', os.path.defpath),
+ )
+
+ required = (
+ 'HOME',
+ )
+
+ optional = (
+ 'LD_LIBRARY_PATH',
+ 'SSH_AUTH_SOCK',
+ # MacOS High Sierra Compatibility
+ # http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
+ # Example configuration for macOS:
+ # export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
+ 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY',
+ 'ANSIBLE_KEEP_REMOTE_FILES',
+ # MacOS Homebrew Compatibility
+ # https://cryptography.io/en/latest/installation/#building-cryptography-on-macos
+ # This may also be required to install pyyaml with libyaml support when installed in non-standard locations.
+ # Example configuration for brew on macOS:
+ # export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/"
+ # export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/"
+ 'LDFLAGS',
+ 'CFLAGS',
+ )
+
+ # FreeBSD Compatibility
+ # This is required to include libyaml support in PyYAML.
+ # The header /usr/local/include/yaml.h isn't in the default include path for the compiler.
+ # It is included here so that tests can take advantage of it, rather than only ansible-test during managed pip installs.
+ # If CFLAGS has been set in the environment that value will take precedence due to being an optional var when calling pass_vars.
+ if os.path.exists('/etc/freebsd-update.conf'):
+ env.update(CFLAGS='-I/usr/local/include/')
+
+ env.update(pass_vars(required=required, optional=optional))
+
+ return env
+
+
+def report_locale(show_warning: bool) -> None:
+ """Report the configured locale and the locale warning, if applicable."""
+
+ display.info(f'Configured locale: {CONFIGURED_LOCALE}', verbosity=1)
+
+ if LOCALE_WARNING and show_warning:
+ display.warning(LOCALE_WARNING)
+
+
+def pass_vars(required: c.Collection[str], optional: c.Collection[str]) -> dict[str, str]:
+ """Return a filtered dictionary of environment variables based on the current environment."""
+ env = {}
+
+ for name in required:
+ if name not in os.environ:
+ raise MissingEnvironmentVariable(name)
+ env[name] = os.environ[name]
+
+ for name in optional:
+ if name not in os.environ:
+ continue
+ env[name] = os.environ[name]
+
+ return env
+
+
+def verified_chmod(path: str, mode: int) -> None:
+ """Perform chmod on the specified path and then verify the permissions were applied."""
+ os.chmod(path, mode) # pylint: disable=ansible-bad-function
+
+ executable = any(mode & perm for perm in (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH))
+
+ if executable and not os.access(path, os.X_OK):
+ raise ApplicationError(f'Path "{path}" should executable, but is not. Is the filesystem mounted with the "noexec" option?')
+
+
+def remove_tree(path: str) -> None:
+ """Remove the specified directory, silently continuing if the directory does not exist."""
+ try:
+ shutil.rmtree(to_bytes(path))
+ except FileNotFoundError:
+ pass
+
+
+def is_binary_file(path: str) -> bool:
+ """Return True if the specified file is a binary file, otherwise return False."""
+ assume_text = {
+ '.cfg',
+ '.conf',
+ '.crt',
+ '.cs',
+ '.css',
+ '.html',
+ '.ini',
+ '.j2',
+ '.js',
+ '.json',
+ '.md',
+ '.pem',
+ '.ps1',
+ '.psm1',
+ '.py',
+ '.rst',
+ '.sh',
+ '.txt',
+ '.xml',
+ '.yaml',
+ '.yml',
+ }
+
+ assume_binary = {
+ '.bin',
+ '.eot',
+ '.gz',
+ '.ico',
+ '.iso',
+ '.jpg',
+ '.otf',
+ '.p12',
+ '.png',
+ '.pyc',
+ '.rpm',
+ '.ttf',
+ '.woff',
+ '.woff2',
+ '.zip',
+ }
+
+ ext = os.path.splitext(path)[1]
+
+ if ext in assume_text:
+ return False
+
+ if ext in assume_binary:
+ return True
+
+ with open_binary_file(path) as path_fd:
+ return b'\0' in path_fd.read(4096)
+
+
+def generate_name(length: int = 8) -> str:
+ """Generate and return a random name."""
+ return ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(length))
+
+
+def generate_password() -> str:
+ """Generate and return random password."""
+ chars = [
+ string.ascii_letters,
+ string.digits,
+ string.ascii_letters,
+ string.digits,
+ '-',
+ ] * 4
+
+ password = ''.join([random.choice(char) for char in chars[:-1]])
+
+ display.sensitive.add(password)
+
+ return password
+
+
+class Display:
+ """Manages color console output."""
+ clear = '\033[0m'
+ red = '\033[31m'
+ green = '\033[32m'
+ yellow = '\033[33m'
+ blue = '\033[34m'
+ purple = '\033[35m'
+ cyan = '\033[36m'
+
+ verbosity_colors = {
+ 0: None,
+ 1: green,
+ 2: blue,
+ 3: cyan,
+ }
+
+ def __init__(self) -> None:
+ self.verbosity = 0
+ self.color = sys.stdout.isatty()
+ self.warnings: list[str] = []
+ self.warnings_unique: set[str] = set()
+ self.fd = sys.stderr # default to stderr until config is initialized to avoid early messages going to stdout
+ self.rows = 0
+ self.columns = 0
+ self.truncate = 0
+ self.redact = True
+ self.sensitive: set[str] = set()
+
+ if os.isatty(0):
+ self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2]
+
+ def __warning(self, message: str) -> None:
+ """Internal implementation for displaying a warning message."""
+ self.print_message('WARNING: %s' % message, color=self.purple)
+
+ def review_warnings(self) -> None:
+ """Review all warnings which previously occurred."""
+ if not self.warnings:
+ return
+
+ self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
+
+ for warning in self.warnings:
+ self.__warning(warning)
+
+ def warning(self, message: str, unique: bool = False, verbosity: int = 0) -> None:
+ """Display a warning level message."""
+ if verbosity > self.verbosity:
+ return
+
+ if unique:
+ if message in self.warnings_unique:
+ return
+
+ self.warnings_unique.add(message)
+
+ self.__warning(message)
+ self.warnings.append(message)
+
+ def notice(self, message: str) -> None:
+ """Display a notice level message."""
+ self.print_message('NOTICE: %s' % message, color=self.purple)
+
+ def error(self, message: str) -> None:
+ """Display an error level message."""
+ self.print_message('ERROR: %s' % message, color=self.red)
+
+ def fatal(self, message: str) -> None:
+ """Display a fatal level message."""
+ self.print_message('FATAL: %s' % message, color=self.red, stderr=True)
+
+ def info(self, message: str, verbosity: int = 0, truncate: bool = False) -> None:
+ """Display an info level message."""
+ if self.verbosity >= verbosity:
+ color = self.verbosity_colors.get(verbosity, self.yellow)
+ self.print_message(message, color=color, truncate=truncate)
+
+ def print_message( # pylint: disable=locally-disabled, invalid-name
+ self,
+ message: str,
+ color: t.Optional[str] = None,
+ stderr: bool = False,
+ truncate: bool = False,
+ ) -> None:
+ """Display a message."""
+ if self.redact and self.sensitive:
+ for item in self.sensitive:
+ if not item:
+ continue
+
+ message = message.replace(item, '*' * len(item))
+
+ if truncate:
+ if len(message) > self.truncate > 5:
+ message = message[:self.truncate - 5] + ' ...'
+
+ if color and self.color:
+ # convert color resets in message to desired color
+ message = message.replace(self.clear, color)
+ message = '%s%s%s' % (color, message, self.clear)
+
+ fd = sys.stderr if stderr else self.fd
+
+ print(message, file=fd)
+ fd.flush()
+
+
+class InternalError(Exception):
+ """An unhandled internal error indicating a bug in the code."""
+ def __init__(self, message: str) -> None:
+ super().__init__(f'An internal error has occurred in ansible-test: {message}')
+
+
+class ApplicationError(Exception):
+ """General application error."""
+
+
+class ApplicationWarning(Exception):
+ """General application warning which interrupts normal program flow."""
+
+
+class SubprocessError(ApplicationError):
+ """Error resulting from failed subprocess execution."""
+ def __init__(
+ self,
+ cmd: list[str],
+ status: int = 0,
+ stdout: t.Optional[str] = None,
+ stderr: t.Optional[str] = None,
+ runtime: t.Optional[float] = None,
+ error_callback: t.Optional[c.Callable[[SubprocessError], None]] = None,
+ ) -> None:
+ message = 'Command "%s" returned exit status %s.\n' % (shlex.join(cmd), status)
+
+ if stderr:
+ message += '>>> Standard Error\n'
+ message += '%s%s\n' % (stderr.strip(), Display.clear)
+
+ if stdout:
+ message += '>>> Standard Output\n'
+ message += '%s%s\n' % (stdout.strip(), Display.clear)
+
+ self.cmd = cmd
+ self.message = message
+ self.status = status
+ self.stdout = stdout
+ self.stderr = stderr
+ self.runtime = runtime
+
+ if error_callback:
+ error_callback(self)
+
+ self.message = self.message.strip()
+
+ super().__init__(self.message)
+
+
+class MissingEnvironmentVariable(ApplicationError):
+ """Error caused by missing environment variable."""
+ def __init__(self, name: str) -> None:
+ super().__init__('Missing environment variable: %s' % name)
+
+ self.name = name
+
+
+class HostConnectionError(ApplicationError):
+ """
+ Raised when the initial connection during host profile setup has failed and all retries have been exhausted.
+ Raised by provisioning code when one or more provisioning threads raise this exception.
+ Also raised when an SSH connection fails for the shell command.
+ """
+ def __init__(self, message: str, callback: t.Callable[[], None] = None) -> None:
+ super().__init__(message)
+
+ self._callback = callback
+
+ def run_callback(self) -> None:
+ """Run the error callback, if any."""
+ if self._callback:
+ self._callback()
+
+
+def retry(func: t.Callable[..., TValue], ex_type: t.Type[BaseException] = SubprocessError, sleep: int = 10, attempts: int = 10, warn: bool = True) -> TValue:
+ """Retry the specified function on failure."""
+ for dummy in range(1, attempts):
+ try:
+ return func()
+ except ex_type as ex:
+ if warn:
+ display.warning(str(ex))
+
+ time.sleep(sleep)
+
+ return func()
+
+
+def parse_to_list_of_dict(pattern: str, value: str) -> list[dict[str, str]]:
+ """Parse lines from the given value using the specified pattern and return the extracted list of key/value pair dictionaries."""
+ matched = []
+ unmatched = []
+
+ for line in value.splitlines():
+ match = re.search(pattern, line)
+
+ if match:
+ matched.append(match.groupdict())
+ else:
+ unmatched.append(line)
+
+ if unmatched:
+ raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched)))
+
+ return matched
+
+
+def get_subclasses(class_type: t.Type[C]) -> list[t.Type[C]]:
+ """Returns a list of types that are concrete subclasses of the given type."""
+ subclasses: set[t.Type[C]] = set()
+ queue: list[t.Type[C]] = [class_type]
+
+ while queue:
+ parent = queue.pop()
+
+ for child in parent.__subclasses__():
+ if child not in subclasses:
+ if not inspect.isabstract(child):
+ subclasses.add(child)
+ queue.append(child)
+
+ return sorted(subclasses, key=lambda sc: sc.__name__)
+
+
+def is_subdir(candidate_path: str, path: str) -> bool:
+ """Returns true if candidate_path is path or a subdirectory of path."""
+ if not path.endswith(os.path.sep):
+ path += os.path.sep
+
+ if not candidate_path.endswith(os.path.sep):
+ candidate_path += os.path.sep
+
+ return candidate_path.startswith(path)
+
+
+def paths_to_dirs(paths: list[str]) -> list[str]:
+ """Returns a list of directories extracted from the given list of paths."""
+ dir_names = set()
+
+ for path in paths:
+ while True:
+ path = os.path.dirname(path)
+
+ if not path or path == os.path.sep:
+ break
+
+ dir_names.add(path + os.path.sep)
+
+ return sorted(dir_names)
+
+
+def str_to_version(version: str) -> tuple[int, ...]:
+ """Return a version tuple from a version string."""
+ return tuple(int(n) for n in version.split('.'))
+
+
+def version_to_str(version: tuple[int, ...]) -> str:
+ """Return a version string from a version tuple."""
+ return '.'.join(str(n) for n in version)
+
+
+def sorted_versions(versions: list[str]) -> list[str]:
+ """Return a sorted copy of the given list of versions."""
+ return [version_to_str(version) for version in sorted(str_to_version(version) for version in versions)]
+
+
+def import_plugins(directory: str, root: t.Optional[str] = None) -> None:
+ """
+ Import plugins from the given directory relative to the given root.
+ If the root is not provided, the 'lib' directory for the test runner will be used.
+ """
+ if root is None:
+ root = os.path.dirname(__file__)
+
+ path = os.path.join(root, directory)
+ package = __name__.rsplit('.', 1)[0]
+ prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.'))
+
+ for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix):
+ module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py')
+ load_module(module_path, name)
+
+
+def load_plugins(base_type: t.Type[C], database: dict[str, t.Type[C]]) -> None:
+ """
+ Load plugins of the specified type and track them in the specified database.
+ Only plugins which have already been imported will be loaded.
+ """
+ plugins: dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type))
+
+ for plugin in plugins:
+ database[plugin] = plugins[plugin]
+
+
+def load_module(path: str, name: str) -> None:
+ """Load a Python module using the given name and path."""
+ if name in sys.modules:
+ return
+
+ spec = importlib.util.spec_from_file_location(name, path)
+ module = importlib.util.module_from_spec(spec)
+ sys.modules[name] = module
+ spec.loader.exec_module(module)
+
+
+def sanitize_host_name(name: str) -> str:
+ """Return a sanitized version of the given name, suitable for use as a hostname."""
+ return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-')
+
+
+def get_generic_type(base_type: t.Type, generic_base_type: t.Type[TValue]) -> t.Optional[t.Type[TValue]]:
+ """Return the generic type arg derived from the generic_base_type type that is associated with the base_type type, if any, otherwise return None."""
+ # noinspection PyUnresolvedReferences
+ type_arg = t.get_args(base_type.__orig_bases__[0])[0]
+ return None if isinstance(type_arg, generic_base_type) else type_arg
+
+
+def get_type_associations(base_type: t.Type[TBase], generic_base_type: t.Type[TValue]) -> list[tuple[t.Type[TValue], t.Type[TBase]]]:
+ """Create and return a list of tuples associating generic_base_type derived types with a corresponding base_type derived type."""
+ return [item for item in [(get_generic_type(sc_type, generic_base_type), sc_type) for sc_type in get_subclasses(base_type)] if item[1]]
+
+
+def get_type_map(base_type: t.Type[TBase], generic_base_type: t.Type[TValue]) -> dict[t.Type[TValue], t.Type[TBase]]:
+ """Create and return a mapping of generic_base_type derived types to base_type derived types."""
+ return {item[0]: item[1] for item in get_type_associations(base_type, generic_base_type)}
+
+
+def verify_sys_executable(path: str) -> t.Optional[str]:
+ """Verify that the given path references the current Python interpreter. If not, return the expected path, otherwise return None."""
+ if path == sys.executable:
+ return None
+
+ if os.path.realpath(path) == os.path.realpath(sys.executable):
+ return None
+
+ expected_executable = raw_command([path, '-c', 'import sys; print(sys.executable)'], capture=True)[0]
+
+ if expected_executable == sys.executable:
+ return None
+
+ return expected_executable
+
+
+def type_guard(sequence: c.Sequence[t.Any], guard_type: t.Type[C]) -> TypeGuard[c.Sequence[C]]:
+ """
+ Raises an exception if any item in the given sequence does not match the specified guard type.
+ Use with assert so that type checkers are aware of the type guard.
+ """
+ invalid_types = set(type(item) for item in sequence if not isinstance(item, guard_type))
+
+ if not invalid_types:
+ return True
+
+ invalid_type_names = sorted(str(item) for item in invalid_types)
+
+ raise Exception(f'Sequence required to contain only {guard_type} includes: {", ".join(invalid_type_names)}')
+
+
+display = Display() # pylint: disable=locally-disabled, invalid-name
diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py
new file mode 100644
index 0000000..1dfc7f3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util_common.py
@@ -0,0 +1,486 @@
+"""Common utility code that depends on CommonConfig."""
+from __future__ import annotations
+
+import atexit
+import collections.abc as c
+import contextlib
+import json
+import os
+import re
+import shlex
+import sys
+import tempfile
+import textwrap
+import typing as t
+
+from .constants import (
+ ANSIBLE_BIN_SYMLINK_MAP,
+)
+
+from .encoding import (
+ to_bytes,
+)
+
+from .util import (
+ cache,
+ display,
+ get_ansible_version,
+ remove_tree,
+ MODE_DIRECTORY,
+ MODE_FILE_EXECUTE,
+ MODE_FILE,
+ OutputStream,
+ PYTHON_PATHS,
+ raw_command,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_TEST_TARGET_ROOT,
+ ANSIBLE_TEST_TARGET_TOOLS_ROOT,
+ ApplicationError,
+ SubprocessError,
+ generate_name,
+ verified_chmod,
+)
+
+from .io import (
+ make_dirs,
+ read_text_file,
+ write_text_file,
+ write_json_file,
+)
+
+from .data import (
+ data_context,
+)
+
+from .provider.layout import (
+ LayoutMessages,
+)
+
+from .host_configs import (
+ PythonConfig,
+ VirtualPythonConfig,
+)
+
+CHECK_YAML_VERSIONS: dict[str, t.Any] = {}
+
+
+class ShellScriptTemplate:
+ """A simple substitution template for shell scripts."""
+ def __init__(self, template: str) -> None:
+ self.template = template
+
+ def substitute(self, **kwargs: t.Union[str, list[str]]) -> str:
+ """Return a string templated with the given arguments."""
+ kvp = dict((k, self.quote(v)) for k, v in kwargs.items())
+ pattern = re.compile(r'#{(?P<name>[^}]+)}')
+ value = pattern.sub(lambda match: kvp[match.group('name')], self.template)
+ return value
+
+ @staticmethod
+ def quote(value: t.Union[str, list[str]]) -> str:
+ """Return a shell quoted version of the given value."""
+ if isinstance(value, list):
+ return shlex.quote(' '.join(value))
+
+ return shlex.quote(value)
+
+
+class ResultType:
+ """Test result type."""
+ BOT: ResultType = None
+ COVERAGE: ResultType = None
+ DATA: ResultType = None
+ JUNIT: ResultType = None
+ LOGS: ResultType = None
+ REPORTS: ResultType = None
+ TMP: ResultType = None
+
+ @staticmethod
+ def _populate() -> None:
+ ResultType.BOT = ResultType('bot')
+ ResultType.COVERAGE = ResultType('coverage')
+ ResultType.DATA = ResultType('data')
+ ResultType.JUNIT = ResultType('junit')
+ ResultType.LOGS = ResultType('logs')
+ ResultType.REPORTS = ResultType('reports')
+ ResultType.TMP = ResultType('.tmp')
+
+ def __init__(self, name: str) -> None:
+ self.name = name
+
+ @property
+ def relative_path(self) -> str:
+ """The content relative path to the results."""
+ return os.path.join(data_context().content.results_path, self.name)
+
+ @property
+ def path(self) -> str:
+ """The absolute path to the results."""
+ return os.path.join(data_context().content.root, self.relative_path)
+
+ def __str__(self) -> str:
+ return self.name
+
+
+# noinspection PyProtectedMember
+ResultType._populate() # pylint: disable=protected-access
+
+
+class CommonConfig:
+ """Configuration common to all commands."""
+ def __init__(self, args: t.Any, command: str) -> None:
+ self.command = command
+ self.interactive = False
+ self.check_layout = True
+ self.success: t.Optional[bool] = None
+
+ self.color: bool = args.color
+ self.explain: bool = args.explain
+ self.verbosity: int = args.verbosity
+ self.debug: bool = args.debug
+ self.truncate: int = args.truncate
+ self.redact: bool = args.redact
+
+ self.display_stderr: bool = False
+
+ self.session_name = generate_name()
+
+ self.cache: dict[str, t.Any] = {}
+
+ def get_ansible_config(self) -> str:
+ """Return the path to the Ansible config for the given config."""
+ return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg')
+
+
+def get_docs_url(url: str) -> str:
+ """
+ Return the given docs.ansible.com URL updated to match the running ansible-test version, if it is not a pre-release version.
+ The URL should be in the form: https://docs.ansible.com/ansible/devel/path/to/doc.html
+ Where 'devel' will be replaced with the current version, unless it is a pre-release version.
+ When run under a pre-release version, the URL will remain unchanged.
+ This serves to provide a fallback URL for pre-release versions.
+ It also makes searching the source for docs links easier, since a full URL is provided to this function.
+ """
+ url_prefix = 'https://docs.ansible.com/ansible-core/devel/'
+
+ if not url.startswith(url_prefix):
+ raise ValueError(f'URL "{url}" does not start with: {url_prefix}')
+
+ ansible_version = get_ansible_version()
+
+ if re.search(r'^[0-9.]+$', ansible_version):
+ url_version = '.'.join(ansible_version.split('.')[:2])
+ new_prefix = f'https://docs.ansible.com/ansible-core/{url_version}/'
+
+ url = url.replace(url_prefix, new_prefix)
+
+ return url
+
+
+def create_result_directories(args: CommonConfig) -> None:
+ """Create result directories."""
+ if args.explain:
+ return
+
+ make_dirs(ResultType.COVERAGE.path)
+ make_dirs(ResultType.DATA.path)
+
+
+def handle_layout_messages(messages: t.Optional[LayoutMessages]) -> None:
+ """Display the given layout messages."""
+ if not messages:
+ return
+
+ for message in messages.info:
+ display.info(message, verbosity=1)
+
+ for message in messages.warning:
+ display.warning(message)
+
+ if messages.error:
+ raise ApplicationError('\n'.join(messages.error))
+
+
+def process_scoped_temporary_file(args: CommonConfig, prefix: t.Optional[str] = 'ansible-test-', suffix: t.Optional[str] = None) -> str:
+ """Return the path to a temporary file that will be automatically removed when the process exits."""
+ if args.explain:
+ path = os.path.join(tempfile.gettempdir(), f'{prefix or tempfile.gettempprefix()}{generate_name()}{suffix or ""}')
+ else:
+ temp_fd, path = tempfile.mkstemp(prefix=prefix, suffix=suffix)
+ os.close(temp_fd)
+ atexit.register(lambda: os.remove(path))
+
+ return path
+
+
+def process_scoped_temporary_directory(args: CommonConfig, prefix: t.Optional[str] = 'ansible-test-', suffix: t.Optional[str] = None) -> str:
+ """Return the path to a temporary directory that will be automatically removed when the process exits."""
+ if args.explain:
+ path = os.path.join(tempfile.gettempdir(), f'{prefix or tempfile.gettempprefix()}{generate_name()}{suffix or ""}')
+ else:
+ path = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
+ atexit.register(lambda: remove_tree(path))
+
+ return path
+
+
+@contextlib.contextmanager
+def named_temporary_file(args: CommonConfig, prefix: str, suffix: str, directory: t.Optional[str], content: str) -> c.Iterator[str]:
+ """Context manager for a named temporary file."""
+ if args.explain:
+ yield os.path.join(directory or '/tmp', '%stemp%s' % (prefix, suffix))
+ else:
+ with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd:
+ tempfile_fd.write(to_bytes(content))
+ tempfile_fd.flush()
+
+ yield tempfile_fd.name
+
+
+def write_json_test_results(category: ResultType,
+ name: str,
+ content: t.Union[list[t.Any], dict[str, t.Any]],
+ formatted: bool = True,
+ encoder: t.Optional[t.Type[json.JSONEncoder]] = None,
+ ) -> None:
+ """Write the given json content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder)
+
+
+def write_text_test_results(category: ResultType, name: str, content: str) -> None:
+ """Write the given text content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_text_file(path, content, create_directories=True)
+
+
+@cache
+def get_injector_path() -> str:
+ """Return the path to a directory which contains a `python.py` executable and associated injector scripts."""
+ injector_path = tempfile.mkdtemp(prefix='ansible-test-', suffix='-injector', dir='/tmp')
+
+ display.info(f'Initializing "{injector_path}" as the temporary injector directory.', verbosity=1)
+
+ injector_names = sorted(list(ANSIBLE_BIN_SYMLINK_MAP) + [
+ 'importer.py',
+ 'pytest',
+ ])
+
+ scripts = (
+ ('python.py', '/usr/bin/env python', MODE_FILE_EXECUTE),
+ ('virtualenv.sh', '/usr/bin/env bash', MODE_FILE),
+ )
+
+ source_path = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'injector')
+
+ for name in injector_names:
+ os.symlink('python.py', os.path.join(injector_path, name))
+
+ for name, shebang, mode in scripts:
+ src = os.path.join(source_path, name)
+ dst = os.path.join(injector_path, name)
+
+ script = read_text_file(src)
+ script = set_shebang(script, shebang)
+
+ write_text_file(dst, script)
+ verified_chmod(dst, mode)
+
+ verified_chmod(injector_path, MODE_DIRECTORY)
+
+ def cleanup_injector() -> None:
+ """Remove the temporary injector directory."""
+ remove_tree(injector_path)
+
+ atexit.register(cleanup_injector)
+
+ return injector_path
+
+
+def set_shebang(script: str, executable: str) -> str:
+ """Return the given script with the specified executable used for the shebang."""
+ prefix = '#!'
+ shebang = prefix + executable
+
+ overwrite = (
+ prefix,
+ '# auto-shebang',
+ '# shellcheck shell=',
+ )
+
+ lines = script.splitlines()
+
+ if any(lines[0].startswith(value) for value in overwrite):
+ lines[0] = shebang
+ else:
+ lines.insert(0, shebang)
+
+ script = '\n'.join(lines)
+
+ return script
+
+
+def get_python_path(interpreter: str) -> str:
+ """Return the path to a directory which contains a `python` executable that runs the specified interpreter."""
+ python_path = PYTHON_PATHS.get(interpreter)
+
+ if python_path:
+ return python_path
+
+ prefix = 'python-'
+ suffix = '-ansible'
+
+ root_temp_dir = '/tmp'
+
+ python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+ injected_interpreter = os.path.join(python_path, 'python')
+
+ # A symlink is faster than the execv wrapper, but isn't guaranteed to provide the correct result.
+ # There are several scenarios known not to work with symlinks:
+ #
+ # - A virtual environment where the target is a symlink to another directory.
+ # - A pyenv environment where the target is a shell script that changes behavior based on the program name.
+ #
+ # To avoid issues for these and other scenarios, only an exec wrapper is used.
+
+ display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
+
+ create_interpreter_wrapper(interpreter, injected_interpreter)
+
+ verified_chmod(python_path, MODE_DIRECTORY)
+
+ if not PYTHON_PATHS:
+ atexit.register(cleanup_python_paths)
+
+ PYTHON_PATHS[interpreter] = python_path
+
+ return python_path
+
+
+def create_temp_dir(prefix: t.Optional[str] = None, suffix: t.Optional[str] = None, base_dir: t.Optional[str] = None) -> str:
+ """Create a temporary directory that persists until the current process exits."""
+ temp_path = tempfile.mkdtemp(prefix=prefix or 'tmp', suffix=suffix or '', dir=base_dir)
+ atexit.register(remove_tree, temp_path)
+ return temp_path
+
+
+def create_interpreter_wrapper(interpreter: str, injected_interpreter: str) -> None:
+ """Create a wrapper for the given Python interpreter at the specified path."""
+ # sys.executable is used for the shebang to guarantee it is a binary instead of a script
+ # injected_interpreter could be a script from the system or our own wrapper created for the --venv option
+ shebang_interpreter = sys.executable
+
+ code = textwrap.dedent('''
+ #!%s
+
+ from __future__ import absolute_import
+
+ from os import execv
+ from sys import argv
+
+ python = '%s'
+
+ execv(python, [python] + argv[1:])
+ ''' % (shebang_interpreter, interpreter)).lstrip()
+
+ write_text_file(injected_interpreter, code)
+
+ verified_chmod(injected_interpreter, MODE_FILE_EXECUTE)
+
+
+def cleanup_python_paths() -> None:
+ """Clean up all temporary python directories."""
+ for path in sorted(PYTHON_PATHS.values()):
+ display.info('Cleaning up temporary python directory: %s' % path, verbosity=2)
+ remove_tree(path)
+
+
+def intercept_python(
+ args: CommonConfig,
+ python: PythonConfig,
+ cmd: list[str],
+ env: dict[str, str],
+ capture: bool,
+ data: t.Optional[str] = None,
+ cwd: t.Optional[str] = None,
+ always: bool = False,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """
+ Run a command while intercepting invocations of Python to control the version used.
+ If the specified Python is an ansible-test managed virtual environment, it will be added to PATH to activate it.
+ Otherwise a temporary directory will be created to ensure the correct Python can be found in PATH.
+ """
+ env = env.copy()
+ cmd = list(cmd)
+ inject_path = get_injector_path()
+
+ # make sure scripts (including injector.py) find the correct Python interpreter
+ if isinstance(python, VirtualPythonConfig):
+ python_path = os.path.dirname(python.path)
+ else:
+ python_path = get_python_path(python.path)
+
+ env['PATH'] = os.path.pathsep.join([inject_path, python_path, env['PATH']])
+ env['ANSIBLE_TEST_PYTHON_VERSION'] = python.version
+ env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = python.path
+
+ return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd, always=always)
+
+
+def run_command(
+ args: CommonConfig,
+ cmd: c.Iterable[str],
+ capture: bool,
+ env: t.Optional[dict[str, str]] = None,
+ data: t.Optional[str] = None,
+ cwd: t.Optional[str] = None,
+ always: bool = False,
+ stdin: t.Optional[t.IO[bytes]] = None,
+ stdout: t.Optional[t.IO[bytes]] = None,
+ interactive: bool = False,
+ output_stream: t.Optional[OutputStream] = None,
+ cmd_verbosity: int = 1,
+ str_errors: str = 'strict',
+ error_callback: t.Optional[c.Callable[[SubprocessError], None]] = None,
+) -> tuple[t.Optional[str], t.Optional[str]]:
+ """Run the specified command and return stdout and stderr as a tuple."""
+ explain = args.explain and not always
+ return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout, interactive=interactive,
+ output_stream=output_stream, cmd_verbosity=cmd_verbosity, str_errors=str_errors, error_callback=error_callback)
+
+
+def yamlcheck(python: PythonConfig) -> t.Optional[bool]:
+ """Return True if PyYAML has libyaml support, False if it does not and None if it was not found."""
+ result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0])
+
+ if not result['yaml']:
+ return None
+
+ return result['cloader']
+
+
+def check_pyyaml(python: PythonConfig, required: bool = True, quiet: bool = False) -> t.Optional[bool]:
+ """
+ Return True if PyYAML has libyaml support, False if it does not and None if it was not found.
+ The result is cached if True or required.
+ """
+ try:
+ return CHECK_YAML_VERSIONS[python.path]
+ except KeyError:
+ pass
+
+ state = yamlcheck(python)
+
+ if state is not None or required:
+ # results are cached only if pyyaml is required or present
+ # it is assumed that tests will not uninstall/re-install pyyaml -- if they do, those changes will go undetected
+ CHECK_YAML_VERSIONS[python.path] = state
+
+ if not quiet:
+ if state is None:
+ if required:
+ display.warning('PyYAML is not installed for interpreter: %s' % python.path)
+ elif not state:
+ display.warning('PyYAML will be slow due to installation without libyaml support for interpreter: %s' % python.path)
+
+ return state
diff --git a/test/lib/ansible_test/_internal/venv.py b/test/lib/ansible_test/_internal/venv.py
new file mode 100644
index 0000000..ec498ed
--- /dev/null
+++ b/test/lib/ansible_test/_internal/venv.py
@@ -0,0 +1,278 @@
+"""Virtual environment management."""
+from __future__ import annotations
+
+import collections.abc as c
+import json
+import os
+import pathlib
+import sys
+import typing as t
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .util import (
+ find_python,
+ SubprocessError,
+ get_available_python_versions,
+ ANSIBLE_TEST_TARGET_TOOLS_ROOT,
+ display,
+ remove_tree,
+ ApplicationError,
+ str_to_version,
+ raw_command,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+)
+
+from .host_configs import (
+ VirtualPythonConfig,
+ PythonConfig,
+)
+
+from .python_requirements import (
+ collect_bootstrap,
+ run_pip,
+)
+
+
+def get_virtual_python(
+ args: EnvironmentConfig,
+ python: VirtualPythonConfig,
+) -> VirtualPythonConfig:
+ """Create a virtual environment for the given Python and return the path to its root."""
+ if python.system_site_packages:
+ suffix = '-ssp'
+ else:
+ suffix = ''
+
+ virtual_environment_path = os.path.join(ResultType.TMP.path, 'delegation', f'python{python.version}{suffix}')
+ virtual_environment_marker = os.path.join(virtual_environment_path, 'marker.txt')
+
+ virtual_environment_python = VirtualPythonConfig(
+ version=python.version,
+ path=os.path.join(virtual_environment_path, 'bin', 'python'),
+ system_site_packages=python.system_site_packages,
+ )
+
+ if os.path.exists(virtual_environment_marker):
+ display.info('Using existing Python %s virtual environment: %s' % (python.version, virtual_environment_path), verbosity=1)
+ else:
+ # a virtualenv without a marker is assumed to have been partially created
+ remove_tree(virtual_environment_path)
+
+ if not create_virtual_environment(args, python, virtual_environment_path, python.system_site_packages):
+ raise ApplicationError(f'Python {python.version} does not provide virtual environment support.')
+
+ commands = collect_bootstrap(virtual_environment_python)
+
+ run_pip(args, virtual_environment_python, commands, None) # get_virtual_python()
+
+ # touch the marker to keep track of when the virtualenv was last used
+ pathlib.Path(virtual_environment_marker).touch()
+
+ return virtual_environment_python
+
+
+def create_virtual_environment(args: EnvironmentConfig,
+ python: PythonConfig,
+ path: str,
+ system_site_packages: bool = False,
+ pip: bool = False,
+ ) -> bool:
+ """Create a virtual environment using venv or virtualenv for the requested Python version."""
+ if not os.path.exists(python.path):
+ # the requested python version could not be found
+ return False
+
+ if str_to_version(python.version) >= (3, 0):
+ # use the built-in 'venv' module on Python 3.x
+ # creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results
+ # in a copy of the original virtual environment instead of creation of a new one
+ # avoid this issue by only using "real" python interpreters to invoke 'venv'
+ for real_python in iterate_real_pythons(python.version):
+ if run_venv(args, real_python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "venv": %s' % (python.version, path), verbosity=1)
+ return True
+
+ # something went wrong, most likely the package maintainer for the Python installation removed ensurepip
+ # which will prevent creation of a virtual environment without installation of other OS packages
+
+ # use the installed 'virtualenv' module on the Python requested version
+ if run_virtualenv(args, python.path, python.path, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv": %s' % (python.version, path), verbosity=1)
+ return True
+
+ available_pythons = get_available_python_versions()
+
+ for available_python_version, available_python_interpreter in sorted(available_pythons.items()):
+ if available_python_interpreter == python.path:
+ # already attempted to use this interpreter
+ continue
+
+ virtualenv_version = get_virtualenv_version(args, available_python_interpreter)
+
+ if not virtualenv_version:
+ # virtualenv not available for this Python or we were unable to detect the version
+ continue
+
+ # try using 'virtualenv' from another Python to setup the desired version
+ if run_virtualenv(args, available_python_interpreter, python.path, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv" on Python %s: %s' % (python.version, available_python_version, path),
+ verbosity=1)
+ return True
+
+ # no suitable 'virtualenv' available
+ return False
+
+
+def iterate_real_pythons(version: str) -> c.Iterable[str]:
+ """
+ Iterate through available real python interpreters of the requested version.
+ The current interpreter will be checked and then the path will be searched.
+ """
+ version_info = str_to_version(version)
+ current_python = None
+
+ if version_info == sys.version_info[:len(version_info)]:
+ current_python = sys.executable
+ real_prefix = get_python_real_prefix(current_python)
+
+ if real_prefix:
+ current_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if current_python:
+ yield current_python
+
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if not path:
+ return
+
+ found_python = find_python(version, path)
+
+ if not found_python:
+ return
+
+ if found_python == current_python:
+ return
+
+ real_prefix = get_python_real_prefix(found_python)
+
+ if real_prefix:
+ found_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if found_python:
+ yield found_python
+
+
+def get_python_real_prefix(python_path: str) -> t.Optional[str]:
+ """
+ Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'.
+ """
+ cmd = [python_path, os.path.join(os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'virtualenvcheck.py'))]
+ check_result = json.loads(raw_command(cmd, capture=True)[0])
+ real_prefix = check_result['real_prefix']
+ return real_prefix
+
+
+def run_venv(args: EnvironmentConfig,
+ run_python: str,
+ system_site_packages: bool,
+ pip: bool,
+ path: str,
+ ) -> bool:
+ """Create a virtual environment using the 'venv' module. Not available on Python 2.x."""
+ cmd = [run_python, '-m', 'venv']
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--without-pip')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex.message)
+
+ return False
+
+ return True
+
+
+def run_virtualenv(args: EnvironmentConfig,
+ run_python: str,
+ env_python: str,
+ system_site_packages: bool,
+ pip: bool,
+ path: str,
+ ) -> bool:
+ """Create a virtual environment using the 'virtualenv' module."""
+ # always specify which interpreter to use to guarantee the desired interpreter is provided
+ # otherwise virtualenv may select a different interpreter than the one running virtualenv
+ cmd = [run_python, '-m', 'virtualenv', '--python', env_python]
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--no-pip')
+ # these options provide consistency with venv, which does not install them without pip
+ cmd.append('--no-setuptools')
+ cmd.append('--no-wheel')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex.message)
+
+ return False
+
+ return True
+
+
+def get_virtualenv_version(args: EnvironmentConfig, python: str) -> t.Optional[tuple[int, ...]]:
+ """Get the virtualenv version for the given python interpreter, if available, otherwise return None."""
+ try:
+ cache = get_virtualenv_version.cache # type: ignore[attr-defined]
+ except AttributeError:
+ cache = get_virtualenv_version.cache = {} # type: ignore[attr-defined]
+
+ if python not in cache:
+ try:
+ stdout = run_command(args, [python, '-m', 'virtualenv', '--version'], capture=True)[0]
+ except SubprocessError as ex:
+ stdout = ''
+
+ if args.verbosity > 1:
+ display.error(ex.message)
+
+ version = None
+
+ if stdout:
+ # noinspection PyBroadException
+ try:
+ version = str_to_version(stdout.strip())
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ cache[python] = version
+
+ version = cache[python]
+
+ return version
diff --git a/test/lib/ansible_test/_util/__init__.py b/test/lib/ansible_test/_util/__init__.py
new file mode 100644
index 0000000..527d413
--- /dev/null
+++ b/test/lib/ansible_test/_util/__init__.py
@@ -0,0 +1,2 @@
+# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x.
+# This allows the ansible-test entry point to report supported Python versions before exiting.
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.json
new file mode 100644
index 0000000..12bbe0d
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.json
@@ -0,0 +1,13 @@
+{
+ "all_targets": true,
+ "prefixes": [
+ "lib/ansible/modules/",
+ "lib/ansible/plugins/action/",
+ "plugins/modules/",
+ "plugins/action/"
+ ],
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py
new file mode 100644
index 0000000..a319d1a
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/action-plugin-docs.py
@@ -0,0 +1,66 @@
+"""Test to verify action plugins have an associated module to provide documentation."""
+from __future__ import annotations
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ module_names = set()
+
+ module_prefixes = {
+ 'lib/ansible/modules/': True,
+ 'plugins/modules/': False,
+ }
+
+ action_prefixes = {
+ 'lib/ansible/plugins/action/': True,
+ 'plugins/action/': False,
+ }
+
+ for path in paths:
+ full_name = get_full_name(path, module_prefixes)
+
+ if full_name:
+ module_names.add(full_name)
+
+ for path in paths:
+ full_name = get_full_name(path, action_prefixes)
+
+ if full_name and full_name not in module_names:
+ print('%s: action plugin has no matching module to provide documentation' % path)
+
+
+def get_full_name(path, prefixes):
+ """Return the full name of the plugin at the given path by matching against the given path prefixes, or None if no match is found."""
+ for prefix, flat in prefixes.items():
+ if path.startswith(prefix):
+ relative_path = os.path.relpath(path, prefix)
+
+ if flat:
+ full_name = os.path.basename(relative_path)
+ else:
+ full_name = relative_path
+
+ full_name = os.path.splitext(full_name)[0]
+
+ name = os.path.basename(full_name)
+
+ if name == '__init__':
+ return None
+
+ if name.startswith('_'):
+ name = name[1:]
+
+ full_name = os.path.join(os.path.dirname(full_name), name).replace(os.path.sep, '.')
+
+ return full_name
+
+ return None
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.json
new file mode 100644
index 0000000..7d19f10
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.json
@@ -0,0 +1,8 @@
+{
+ "intercept": true,
+ "prefixes": [
+ "changelogs/config.yaml",
+ "changelogs/fragments/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py
new file mode 100644
index 0000000..924e5af
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog.py
@@ -0,0 +1,60 @@
+"""Check changelog fragment naming, syntax, etc."""
+from __future__ import annotations
+
+import os
+import sys
+import subprocess
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = ('.yml', '.yaml')
+ config_path = 'changelogs/config.yaml'
+
+ # config must be detected independent of the file list since the file list only contains files under test (changed)
+ has_config = os.path.exists(config_path)
+ paths_to_check = []
+ for path in paths:
+ if path == config_path:
+ continue
+
+ if path.startswith('changelogs/fragments/.'):
+ if path in ('changelogs/fragments/.keep', 'changelogs/fragments/.gitkeep'):
+ continue
+
+ print('%s:%d:%d: file must not be a dotfile' % (path, 0, 0))
+ continue
+
+ ext = os.path.splitext(path)[1]
+
+ if ext not in allowed_extensions:
+ print('%s:%d:%d: extension must be one of: %s' % (path, 0, 0, ', '.join(allowed_extensions)))
+
+ paths_to_check.append(path)
+
+ if not has_config:
+ print('changelogs/config.yaml:0:0: config file does not exist')
+ return
+
+ if not paths_to_check:
+ return
+
+ cmd = [sys.executable, '-m', 'antsibull_changelog', 'lint'] + paths_to_check
+
+ # The sphinx module is a soft dependency for rstcheck, which is used by the changelog linter.
+ # If sphinx is found it will be loaded by rstcheck, which can affect the results of the test.
+ # To maintain consistency across environments, loading of sphinx is blocked, since any version (or no version) of sphinx may be present.
+ env = os.environ.copy()
+ env.update(PYTHONPATH='%s:%s' % (os.path.join(os.path.dirname(__file__), 'changelog'), env['PYTHONPATH']))
+
+ # ignore the return code, rely on the output instead
+ process = subprocess.run(cmd, stdin=subprocess.DEVNULL, capture_output=True, text=True, env=env, check=False)
+
+ sys.stdout.write(process.stdout)
+ sys.stderr.write(process.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py
new file mode 100644
index 0000000..7eab0f5
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/changelog/sphinx.py
@@ -0,0 +1,4 @@
+"""Block the sphinx module from being loaded."""
+from __future__ import annotations
+
+raise ImportError('The sphinx module has been prevented from loading to maintain consistent test results.')
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.json
new file mode 100644
index 0000000..9835f9b
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.json
@@ -0,0 +1,14 @@
+{
+ "prefixes": [
+ "lib/ansible/modules/",
+ "lib/ansible/module_utils/",
+ "plugins/modules/",
+ "plugins/module_utils/",
+ "test/units/",
+ "tests/unit/"
+ ],
+ "files": [
+ "__init__.py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py
new file mode 100644
index 0000000..01aef69
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/empty-init.py
@@ -0,0 +1,16 @@
+"""Require empty __init__.py files."""
+from __future__ import annotations
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ if os.path.getsize(path) > 0:
+ print('%s: empty __init__.py required' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.json
new file mode 100644
index 0000000..4ebce32
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "py2_compat": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py
new file mode 100644
index 0000000..7b39c37
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/future-import-boilerplate.py
@@ -0,0 +1,46 @@
+"""Enforce proper usage of __future__ imports."""
+from __future__ import annotations
+
+import ast
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ lines = path_fd.read().splitlines()
+
+ missing = True
+ if not lines:
+ # Files are allowed to be empty of everything including boilerplate
+ missing = False
+
+ for text in lines:
+ if text in (b'from __future__ import (absolute_import, division, print_function)',
+ b'from __future__ import absolute_import, division, print_function'):
+ missing = False
+ break
+
+ if missing:
+ with open(path, encoding='utf-8') as file:
+ contents = file.read()
+
+ # noinspection PyBroadException
+ try:
+ node = ast.parse(contents)
+
+ # files consisting of only assignments have no need for future import boilerplate
+ # the only exception would be division during assignment, but we'll overlook that for simplicity
+ # the most likely case is that of a documentation only python file
+ if all(isinstance(statement, ast.Assign) for statement in node.body):
+ missing = False
+ except Exception: # pylint: disable=broad-except
+ pass # the compile sanity test will report this error
+
+ if missing:
+ print('%s: missing: from __future__ import (absolute_import, division, print_function)' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.json
new file mode 100644
index 0000000..db5c3c9
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.json
@@ -0,0 +1,4 @@
+{
+ "text": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py
new file mode 100644
index 0000000..31f97ad
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/line-endings.py
@@ -0,0 +1,18 @@
+"""Require Unix line endings."""
+from __future__ import annotations
+
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ contents = path_fd.read()
+
+ if b'\r' in contents:
+ print('%s: use "\\n" for line endings instead of "\\r\\n"' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.json
new file mode 100644
index 0000000..4ebce32
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "py2_compat": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py
new file mode 100644
index 0000000..8bdcfc9
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/metaclass-boilerplate.py
@@ -0,0 +1,44 @@
+"""Require __metaclass__ boilerplate for code that supports Python 2.x."""
+from __future__ import annotations
+
+import ast
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ lines = path_fd.read().splitlines()
+
+ missing = True
+ if not lines:
+ # Files are allowed to be empty of everything including boilerplate
+ missing = False
+
+ for text in lines:
+ if text == b'__metaclass__ = type':
+ missing = False
+ break
+
+ if missing:
+ with open(path, encoding='utf-8') as file:
+ contents = file.read()
+
+ # noinspection PyBroadException
+ try:
+ node = ast.parse(contents)
+
+ # files consisting of only assignments have no need for metaclass boilerplate
+ # the most likely case is that of a documentation only python file
+ if all(isinstance(statement, ast.Assign) for statement in node.body):
+ missing = False
+ except Exception: # pylint: disable=broad-except
+ pass # the compile sanity test will report this error
+
+ if missing:
+ print('%s: missing: __metaclass__ = type' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.json
new file mode 100644
index 0000000..ccee80a
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.json
@@ -0,0 +1,10 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "prefixes": [
+ "lib/ansible/",
+ "plugins/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py
new file mode 100644
index 0000000..8c1c027
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-assert.py
@@ -0,0 +1,24 @@
+"""Disallow use of assert."""
+from __future__ import annotations
+
+import re
+import sys
+
+ASSERT_RE = re.compile(r'^\s*assert[^a-z0-9_:]')
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as file:
+ for i, line in enumerate(file.readlines()):
+ matches = ASSERT_RE.findall(line)
+
+ if matches:
+ lineno = i + 1
+ colno = line.index('assert') + 1
+ print('%s:%d:%d: raise AssertionError instead of: %s' % (path, lineno, colno, matches[0][colno - 1:]))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.json
new file mode 100644
index 0000000..88858ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py
new file mode 100644
index 0000000..74e38d7
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-basestring.py
@@ -0,0 +1,21 @@
+"""Disallow use of basestring isinstance checks."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(isinstance.*basestring)', text)
+
+ if match:
+ print('%s:%d:%d: do not use `isinstance(s, basestring)`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.json
new file mode 100644
index 0000000..88858ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py
new file mode 100644
index 0000000..b4e4002
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iteritems.py
@@ -0,0 +1,21 @@
+"""Disallow use of the dict.iteritems function."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(?<! six)\.(iteritems)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.items` or `ansible.module_utils.six.iteritems` instead of `dict.iteritems`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.json
new file mode 100644
index 0000000..88858ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py
new file mode 100644
index 0000000..00c8703
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-iterkeys.py
@@ -0,0 +1,21 @@
+"""Disallow use of the dict.iterkeys function."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'\.(iterkeys)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.keys` or `for key in dict:` instead of `dict.iterkeys`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.json
new file mode 100644
index 0000000..88858ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py
new file mode 100644
index 0000000..2e8036a
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-dict-itervalues.py
@@ -0,0 +1,21 @@
+"""Disallow use of the dict.itervalues function."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(?<! six)\.(itervalues)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.values` or `ansible.module_utils.six.itervalues` instead of `dict.itervalues`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.json
new file mode 100644
index 0000000..88858ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py
new file mode 100644
index 0000000..0abb23d
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-get-exception.py
@@ -0,0 +1,28 @@
+"""Disallow use of the get_exception function."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ basic_allow_once = True
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'([^a-zA-Z0-9_]get_exception[^a-zA-Z0-9_])', text)
+
+ if match:
+ if path == 'lib/ansible/module_utils/basic.py' and basic_allow_once:
+ # basic.py is allowed to import get_exception for backwards compatibility but should not call it anywhere
+ basic_allow_once = False
+ continue
+
+ print('%s:%d:%d: do not use `get_exception`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.json
new file mode 100644
index 0000000..6f13c86
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.json
@@ -0,0 +1,5 @@
+{
+ "include_directories": true,
+ "include_symlinks": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py
new file mode 100644
index 0000000..10bf4aa
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-illegal-filenames.py
@@ -0,0 +1,83 @@
+"""
+Check for illegal filenames on various operating systems.
+The main rules are derived from restrictions on Windows:
+https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions
+"""
+from __future__ import annotations
+
+import os
+import struct
+import sys
+
+from ansible.module_utils.basic import to_bytes
+
+ILLEGAL_CHARS = [
+ b'<',
+ b'>',
+ b':',
+ b'"',
+ b'/',
+ b'\\',
+ b'|',
+ b'?',
+ b'*'
+] + [struct.pack("b", i) for i in range(32)]
+
+ILLEGAL_NAMES = [
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+]
+
+ILLEGAL_END_CHARS = [
+ '.',
+ ' ',
+]
+
+
+def check_path(path, is_dir=False):
+ """Check the specified path for unwanted characters and names."""
+ type_name = 'directory' if is_dir else 'file'
+ file_name = os.path.basename(path.rstrip(os.path.sep))
+ name = os.path.splitext(file_name)[0]
+
+ if name.upper() in ILLEGAL_NAMES:
+ print("%s: illegal %s name %s" % (path, type_name, name.upper()))
+
+ if file_name[-1] in ILLEGAL_END_CHARS:
+ print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))
+
+ bfile = to_bytes(file_name, encoding='utf-8')
+ for char in ILLEGAL_CHARS:
+ if char in bfile:
+ bpath = to_bytes(path, encoding='utf-8')
+ print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ check_path(path, is_dir=path.endswith(os.path.sep))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.json
new file mode 100644
index 0000000..ccee80a
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.json
@@ -0,0 +1,10 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "prefixes": [
+ "lib/ansible/",
+ "plugins/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py
new file mode 100644
index 0000000..eb5987d
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-main-display.py
@@ -0,0 +1,21 @@
+"""Disallow importing display from __main__."""
+from __future__ import annotations
+
+import sys
+
+MAIN_DISPLAY_IMPORT = 'from __main__ import display'
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as file:
+ for i, line in enumerate(file.readlines()):
+ if MAIN_DISPLAY_IMPORT in line:
+ lineno = i + 1
+ colno = line.index(MAIN_DISPLAY_IMPORT) + 1
+ print('%s:%d:%d: Display is a singleton, just import and instantiate' % (path, lineno, colno))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.json
new file mode 100644
index 0000000..54d9fff
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.json
@@ -0,0 +1,5 @@
+{
+ "text": true,
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py
new file mode 100644
index 0000000..461033d
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-smart-quotes.py
@@ -0,0 +1,28 @@
+"""Disallow use of Unicode quotes."""
+# -*- coding: utf-8 -*-
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ try:
+ text = text.decode('utf-8')
+ except UnicodeDecodeError as ex:
+ print('%s:%d:%d: UnicodeDecodeError: %s' % (path, line + 1, ex.start + 1, ex))
+ continue
+
+ match = re.search('([‘’“”])', text)
+
+ if match:
+ print('%s:%d:%d: use ASCII quotes `\'` and `"` instead of Unicode quotes' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.json
new file mode 100644
index 0000000..88858ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py
new file mode 100644
index 0000000..75c34f2
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/no-unicode-literals.py
@@ -0,0 +1,21 @@
+"""Disallow use of the unicode_literals future."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(unicode_literals)', text)
+
+ if match:
+ print('%s:%d:%d: do not use `unicode_literals`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.json
new file mode 100644
index 0000000..88858ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py
new file mode 100644
index 0000000..a6dd5aa
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/replace-urlopen.py
@@ -0,0 +1,21 @@
+"""Disallow use of the urlopen function."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'^(?:[^#]*?)(urlopen)', text)
+
+ if match:
+ print('%s:%d:%d: use `ansible.module_utils.urls.open_url` instead of `urlopen`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.json
new file mode 100644
index 0000000..44003ec
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.json
@@ -0,0 +1,11 @@
+{
+ "prefixes": [
+ "lib/ansible/config/ansible_builtin_runtime.yml",
+ "meta/routing.yml",
+ "meta/runtime.yml"
+ ],
+ "extensions": [
+ ".yml"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py
new file mode 100644
index 0000000..6cf2777
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/runtime-metadata.py
@@ -0,0 +1,277 @@
+"""Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml"""
+from __future__ import annotations
+
+import datetime
+import os
+import re
+import sys
+
+from functools import partial
+
+import yaml
+
+from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA
+from voluptuous import Required, Schema, Invalid
+from voluptuous.humanize import humanize_error
+
+from ansible.module_utils.compat.version import StrictVersion, LooseVersion
+from ansible.module_utils.six import string_types
+from ansible.utils.version import SemanticVersion
+
+
+def isodate(value, check_deprecation_date=False, is_tombstone=False):
+ """Validate a datetime.date or ISO 8601 date string."""
+ # datetime.date objects come from YAML dates, these are ok
+ if isinstance(value, datetime.date):
+ removal_date = value
+ else:
+ # make sure we have a string
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'
+ if not isinstance(value, string_types):
+ raise Invalid(msg)
+ # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
+ # we have to do things manually.
+ if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
+ raise Invalid(msg)
+ try:
+ removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date()
+ except ValueError:
+ raise Invalid(msg)
+ # Make sure date is correct
+ today = datetime.date.today()
+ if is_tombstone:
+ # For a tombstone, the removal date must be in the past
+ if today < removal_date:
+ raise Invalid(
+ 'The tombstone removal_date (%s) must not be after today (%s)' % (removal_date, today))
+ else:
+ # For a deprecation, the removal date must be in the future. Only test this if
+ # check_deprecation_date is truish, to avoid checks to suddenly start to fail.
+ if check_deprecation_date and today > removal_date:
+ raise Invalid(
+ 'The deprecation removal_date (%s) must be after today (%s)' % (removal_date, today))
+ return value
+
+
+def removal_version(value, is_ansible, current_version=None, is_tombstone=False):
+ """Validate a removal version string."""
+ msg = (
+ 'Removal version must be a string' if is_ansible else
+ 'Removal version must be a semantic version (https://semver.org/)'
+ )
+ if not isinstance(value, string_types):
+ raise Invalid(msg)
+ try:
+ if is_ansible:
+ version = StrictVersion()
+ version.parse(value)
+ version = LooseVersion(value) # We're storing Ansible's version as a LooseVersion
+ else:
+ version = SemanticVersion()
+ version.parse(value)
+ if version.major != 0 and (version.minor != 0 or version.patch != 0):
+ raise Invalid('removal_version (%r) must be a major release, not a minor or patch release '
+ '(see specification at https://semver.org/)' % (value, ))
+ if current_version is not None:
+ if is_tombstone:
+ # For a tombstone, the removal version must not be in the future
+ if version > current_version:
+ raise Invalid('The tombstone removal_version (%r) must not be after the '
+ 'current version (%s)' % (value, current_version))
+ else:
+ # For a deprecation, the removal version must be in the future
+ if version <= current_version:
+ raise Invalid('The deprecation removal_version (%r) must be after the '
+ 'current version (%s)' % (value, current_version))
+ except ValueError:
+ raise Invalid(msg)
+ return value
+
+
+def any_value(value):
+ """Accepts anything."""
+ return value
+
+
+def get_ansible_version():
+ """Return current ansible-core version"""
+ from ansible.release import __version__
+
+ return LooseVersion('.'.join(__version__.split('.')[:3]))
+
+
+def get_collection_version():
+ """Return current collection version, or None if it is not available"""
+ import importlib.util
+
+ collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'tools', 'collection_detail.py')
+ collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path)
+ collection_detail = importlib.util.module_from_spec(collection_detail_spec)
+ sys.modules['collection_detail'] = collection_detail
+ collection_detail_spec.loader.exec_module(collection_detail)
+
+ # noinspection PyBroadException
+ try:
+ result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.')
+ return SemanticVersion(result['version'])
+ except Exception: # pylint: disable=broad-except
+ # We do not care why it fails, in case we cannot get the version
+ # just return None to indicate "we don't know".
+ return None
+
+
+def validate_metadata_file(path, is_ansible, check_deprecation_dates=False):
+ """Validate explicit runtime metadata file"""
+ try:
+ with open(path, 'r', encoding='utf-8') as f_path:
+ routing = yaml.safe_load(f_path)
+ except yaml.error.MarkedYAMLError as ex:
+ print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line +
+ 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
+ return
+ except Exception as ex: # pylint: disable=broad-except
+ print('%s:%d:%d: YAML load failed: %s' %
+ (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
+ return
+
+ if is_ansible:
+ current_version = get_ansible_version()
+ else:
+ current_version = get_collection_version()
+
+ # Updates to schema MUST also be reflected in the documentation
+ # ~https://docs.ansible.com/ansible-core/devel/dev_guide/developing_collections.html
+
+ # plugin_routing schema
+
+ avoid_additional_data = Schema(
+ Any(
+ {
+ Required('removal_version'): any_value,
+ 'warning_text': any_value,
+ },
+ {
+ Required('removal_date'): any_value,
+ 'warning_text': any_value,
+ }
+ ),
+ extra=PREVENT_EXTRA
+ )
+
+ deprecation_schema = All(
+ # The first schema validates the input, and the second makes sure no extra keys are specified
+ Schema(
+ {
+ 'removal_version': partial(removal_version, is_ansible=is_ansible,
+ current_version=current_version),
+ 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates),
+ 'warning_text': Any(*string_types),
+ }
+ ),
+ avoid_additional_data
+ )
+
+ tombstoning_schema = All(
+ # The first schema validates the input, and the second makes sure no extra keys are specified
+ Schema(
+ {
+ 'removal_version': partial(removal_version, is_ansible=is_ansible,
+ current_version=current_version, is_tombstone=True),
+ 'removal_date': partial(isodate, is_tombstone=True),
+ 'warning_text': Any(*string_types),
+ }
+ ),
+ avoid_additional_data
+ )
+
+ plugin_routing_schema = Any(
+ Schema({
+ ('deprecation'): Any(deprecation_schema),
+ ('tombstone'): Any(tombstoning_schema),
+ ('redirect'): Any(*string_types),
+ }, extra=PREVENT_EXTRA),
+ )
+
+ list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}
+ for str_type in string_types]
+
+ plugin_schema = Schema({
+ ('action'): Any(None, *list_dict_plugin_routing_schema),
+ ('become'): Any(None, *list_dict_plugin_routing_schema),
+ ('cache'): Any(None, *list_dict_plugin_routing_schema),
+ ('callback'): Any(None, *list_dict_plugin_routing_schema),
+ ('cliconf'): Any(None, *list_dict_plugin_routing_schema),
+ ('connection'): Any(None, *list_dict_plugin_routing_schema),
+ ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
+ ('filter'): Any(None, *list_dict_plugin_routing_schema),
+ ('httpapi'): Any(None, *list_dict_plugin_routing_schema),
+ ('inventory'): Any(None, *list_dict_plugin_routing_schema),
+ ('lookup'): Any(None, *list_dict_plugin_routing_schema),
+ ('module_utils'): Any(None, *list_dict_plugin_routing_schema),
+ ('modules'): Any(None, *list_dict_plugin_routing_schema),
+ ('netconf'): Any(None, *list_dict_plugin_routing_schema),
+ ('shell'): Any(None, *list_dict_plugin_routing_schema),
+ ('strategy'): Any(None, *list_dict_plugin_routing_schema),
+ ('terminal'): Any(None, *list_dict_plugin_routing_schema),
+ ('test'): Any(None, *list_dict_plugin_routing_schema),
+ ('vars'): Any(None, *list_dict_plugin_routing_schema),
+ }, extra=PREVENT_EXTRA)
+
+ # import_redirection schema
+
+ import_redirection_schema = Any(
+ Schema({
+ ('redirect'): Any(*string_types),
+ # import_redirect doesn't currently support deprecation
+ }, extra=PREVENT_EXTRA)
+ )
+
+ list_dict_import_redirection_schema = [{str_type: import_redirection_schema}
+ for str_type in string_types]
+
+ # top level schema
+
+ schema = Schema({
+ # All of these are optional
+ ('plugin_routing'): Any(plugin_schema),
+ ('import_redirection'): Any(None, *list_dict_import_redirection_schema),
+ # requires_ansible: In the future we should validate this with SpecifierSet
+ ('requires_ansible'): Any(*string_types),
+ ('action_groups'): dict,
+ }, extra=PREVENT_EXTRA)
+
+ # Ensure schema is valid
+
+ try:
+ schema(routing)
+ except MultipleInvalid as ex:
+ for error in ex.errors:
+ # No way to get line/column numbers
+ print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ collection_legacy_file = 'meta/routing.yml'
+ collection_runtime_file = 'meta/runtime.yml'
+
+ # This is currently disabled, because if it is enabled this test can start failing
+ # at a random date. For this to be properly activated, we (a) need to be able to return
+ # codes for this test, and (b) make this error optional.
+ check_deprecation_dates = False
+
+ for path in paths:
+ if path == collection_legacy_file:
+ print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file)))
+ continue
+
+ validate_metadata_file(
+ path,
+ is_ansible=path not in (collection_legacy_file, collection_runtime_file),
+ check_deprecation_dates=check_deprecation_dates)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.json
new file mode 100644
index 0000000..5648429
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.json
@@ -0,0 +1,4 @@
+{
+ "text": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py
new file mode 100644
index 0000000..b0b1319
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/shebang.py
@@ -0,0 +1,124 @@
+"""Check shebangs, execute bits and byte order marks."""
+from __future__ import annotations
+
+import os
+import re
+import stat
+import sys
+
+
+def main():
+ """Main entry point."""
+ standard_shebangs = set([
+ b'#!/bin/bash -eu',
+ b'#!/bin/bash -eux',
+ b'#!/bin/sh',
+ b'#!/usr/bin/env bash',
+ b'#!/usr/bin/env fish',
+ b'#!/usr/bin/env pwsh',
+ b'#!/usr/bin/env python',
+ b'#!/usr/bin/make -f',
+ ])
+
+ integration_shebangs = set([
+ b'#!/bin/sh',
+ b'#!/usr/bin/env bash',
+ b'#!/usr/bin/env python',
+ ])
+
+ module_shebangs = {
+ '': b'#!/usr/bin/python',
+ '.py': b'#!/usr/bin/python',
+ '.ps1': b'#!powershell',
+ }
+
+ # see https://unicode.org/faq/utf_bom.html#bom1
+ byte_order_marks = (
+ (b'\x00\x00\xFE\xFF', 'UTF-32 (BE)'),
+ (b'\xFF\xFE\x00\x00', 'UTF-32 (LE)'),
+ (b'\xFE\xFF', 'UTF-16 (BE)'),
+ (b'\xFF\xFE', 'UTF-16 (LE)'),
+ (b'\xEF\xBB\xBF', 'UTF-8'),
+ )
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ shebang = path_fd.readline().strip()
+ mode = os.stat(path).st_mode
+ executable = (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & mode
+
+ if not shebang or not shebang.startswith(b'#!'):
+ if executable:
+ print('%s:%d:%d: file without shebang should not be executable' % (path, 0, 0))
+
+ for mark, name in byte_order_marks:
+ if shebang.startswith(mark):
+ print('%s:%d:%d: file starts with a %s byte order mark' % (path, 0, 0, name))
+ break
+
+ continue
+
+ is_module = False
+ is_integration = False
+
+ dirname = os.path.dirname(path)
+
+ if path.startswith('lib/ansible/modules/'):
+ is_module = True
+ elif re.search('^test/support/[^/]+/plugins/modules/', path):
+ is_module = True
+ elif re.search('^test/support/[^/]+/collections/ansible_collections/[^/]+/[^/]+/plugins/modules/', path):
+ is_module = True
+ elif path == 'test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py':
+ pass # ansible-test entry point must be executable and have a shebang
+ elif re.search(r'^lib/ansible/cli/[^/]+\.py', path):
+ pass # cli entry points must be executable and have a shebang
+ elif path.startswith('examples/'):
+ continue # examples trigger some false positives due to location
+ elif path.startswith('lib/') or path.startswith('test/lib/'):
+ if executable:
+ print('%s:%d:%d: should not be executable' % (path, 0, 0))
+
+ if shebang:
+ print('%s:%d:%d: should not have a shebang' % (path, 0, 0))
+
+ continue
+ elif path.startswith('test/integration/targets/') or path.startswith('tests/integration/targets/'):
+ is_integration = True
+
+ if dirname.endswith('/library') or '/plugins/modules' in dirname or dirname in (
+ # non-standard module library directories
+ 'test/integration/targets/module_precedence/lib_no_extension',
+ 'test/integration/targets/module_precedence/lib_with_extension',
+ ):
+ is_module = True
+ elif path.startswith('plugins/modules/'):
+ is_module = True
+
+ if is_module:
+ if executable:
+ print('%s:%d:%d: module should not be executable' % (path, 0, 0))
+
+ ext = os.path.splitext(path)[1]
+ expected_shebang = module_shebangs.get(ext)
+ expected_ext = ' or '.join(['"%s"' % k for k in module_shebangs])
+
+ if expected_shebang:
+ if shebang == expected_shebang:
+ continue
+
+ print('%s:%d:%d: expected module shebang "%s" but found: %s' % (path, 1, 1, expected_shebang, shebang))
+ else:
+ print('%s:%d:%d: expected module extension %s but found: %s' % (path, 0, 0, expected_ext, ext))
+ else:
+ if is_integration:
+ allowed = integration_shebangs
+ else:
+ allowed = standard_shebangs
+
+ if shebang not in allowed:
+ print('%s:%d:%d: unexpected non-module shebang: %s' % (path, 1, 1, shebang))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.json
new file mode 100644
index 0000000..6f13c86
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.json
@@ -0,0 +1,5 @@
+{
+ "include_directories": true,
+ "include_symlinks": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py
new file mode 100644
index 0000000..5cffc69
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/symlinks.py
@@ -0,0 +1,32 @@
+"""Check for unwanted symbolic links."""
+from __future__ import annotations
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ root_dir = os.getcwd() + os.path.sep
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ if not os.path.islink(path.rstrip(os.path.sep)):
+ continue
+
+ if not os.path.exists(path):
+ print('%s: broken symlinks are not allowed' % path)
+ continue
+
+ if path.endswith(os.path.sep):
+ print('%s: symlinks to directories are not allowed' % path)
+ continue
+
+ real_path = os.path.realpath(path)
+
+ if not real_path.startswith(root_dir):
+ print('%s: symlinks outside content tree are not allowed: %s' % (path, os.path.relpath(real_path, os.path.dirname(path))))
+ continue
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.json
new file mode 100644
index 0000000..3610305
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.json
@@ -0,0 +1,10 @@
+{
+ "prefixes": [
+ "lib/ansible/modules/",
+ "plugins/modules/"
+ ],
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py
new file mode 100644
index 0000000..0faeff3
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-argspec-type-path.py
@@ -0,0 +1,21 @@
+"""Disallow use of the expanduser function."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(expanduser)', text)
+
+ if match:
+ print('%s:%d:%d: use argspec type="path" instead of type="str" to avoid use of `expanduser`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.json b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.json
new file mode 100644
index 0000000..776590b
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.json
@@ -0,0 +1,6 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py
new file mode 100644
index 0000000..db42fec
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/code-smell/use-compat-six.py
@@ -0,0 +1,21 @@
+"""Disallow importing of the six module."""
+from __future__ import annotations
+
+import re
+import sys
+
+
+def main():
+ """Main entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r', encoding='utf-8') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'((^\s*import\s+six\b)|(^\s*from\s+six\b))', text)
+
+ if match:
+ print('%s:%d:%d: use `ansible.module_utils.six` instead of `six`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py b/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py
new file mode 100644
index 0000000..af11dd8
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/integration-aliases/yaml_to_json.py
@@ -0,0 +1,14 @@
+"""Read YAML from stdin and write JSON to stdout."""
+from __future__ import annotations
+
+import json
+import sys
+
+from yaml import load
+
+try:
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout)
diff --git a/test/lib/ansible_test/_util/controller/sanity/mypy/ansible-core.ini b/test/lib/ansible_test/_util/controller/sanity/mypy/ansible-core.ini
new file mode 100644
index 0000000..4d93f35
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/mypy/ansible-core.ini
@@ -0,0 +1,119 @@
+# IMPORTANT
+# Set "ignore_missing_imports" per package below, rather than globally.
+# That will help identify missing type stubs that should be added to the sanity test environment.
+
+[mypy]
+# There are ~20 errors reported in ansible-core when strict optional checking is enabled.
+# Until the number of occurrences are reduced, it's better to disable strict checking.
+strict_optional = False
+# There are ~70 errors reported in ansible-core when checking attributes.
+# Until the number of occurrences are reduced, it's better to disable the check.
+disable_error_code = attr-defined
+
+[mypy-ansible.module_utils.six.moves.*]
+ignore_missing_imports = True
+
+[mypy-passlib.*]
+ignore_missing_imports = True
+
+[mypy-pexpect.*]
+ignore_missing_imports = True
+
+[mypy-pypsrp.*]
+ignore_missing_imports = True
+
+[mypy-winrm.*]
+ignore_missing_imports = True
+
+[mypy-kerberos.*]
+ignore_missing_imports = True
+
+[mypy-xmltodict.*]
+ignore_missing_imports = True
+
+[mypy-md5.*]
+ignore_missing_imports = True
+
+[mypy-scp.*]
+ignore_missing_imports = True
+
+[mypy-ncclient.*]
+ignore_missing_imports = True
+
+[mypy-lxml.*]
+ignore_missing_imports = True
+
+[mypy-yum.*]
+ignore_missing_imports = True
+
+[mypy-rpmUtils.*]
+ignore_missing_imports = True
+
+[mypy-rpm.*]
+ignore_missing_imports = True
+
+[mypy-psutil.*]
+ignore_missing_imports = True
+
+[mypy-dnf.*]
+ignore_missing_imports = True
+
+[mypy-apt.*]
+ignore_missing_imports = True
+
+[mypy-apt_pkg.*]
+ignore_missing_imports = True
+
+[mypy-gssapi.*]
+ignore_missing_imports = True
+
+[mypy-_ssl.*]
+ignore_missing_imports = True
+
+[mypy-urllib_gssapi.*]
+ignore_missing_imports = True
+
+[mypy-systemd.*]
+ignore_missing_imports = True
+
+[mypy-sha.*]
+ignore_missing_imports = True
+
+[mypy-distro.*]
+ignore_missing_imports = True
+
+[mypy-selectors2.*]
+ignore_missing_imports = True
+
+[mypy-resolvelib.*]
+ignore_missing_imports = True
+
+[mypy-urlparse.*]
+ignore_missing_imports = True
+
+[mypy-argcomplete.*]
+ignore_missing_imports = True
+
+[mypy-selinux.*]
+ignore_missing_imports = True
+
+[mypy-urllib2.*]
+ignore_missing_imports = True
+
+[mypy-httplib.*]
+ignore_missing_imports = True
+
+[mypy-compiler.*]
+ignore_missing_imports = True
+
+[mypy-aptsources.*]
+ignore_missing_imports = True
+
+[mypy-urllib3.*]
+ignore_missing_imports = True
+
+[mypy-requests.*]
+ignore_missing_imports = True
+
+[mypy-jinja2.nativetypes]
+ignore_missing_imports = True
diff --git a/test/lib/ansible_test/_util/controller/sanity/mypy/ansible-test.ini b/test/lib/ansible_test/_util/controller/sanity/mypy/ansible-test.ini
new file mode 100644
index 0000000..190e952
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/mypy/ansible-test.ini
@@ -0,0 +1,24 @@
+# IMPORTANT
+# Set "ignore_missing_imports" per package below, rather than globally.
+# That will help identify missing type stubs that should be added to the sanity test environment.
+
+[mypy]
+# There are ~350 errors reported in ansible-test when strict optional checking is enabled.
+# Until the number of occurrences are greatly reduced, it's better to disable strict checking.
+strict_optional = False
+# There are ~25 errors reported in ansible-test under the 'misc' code.
+# The majority of those errors are "Only concrete class can be given", which is due to a limitation of mypy.
+# See: https://github.com/python/mypy/issues/5374
+disable_error_code = misc
+
+[mypy-argcomplete]
+ignore_missing_imports = True
+
+[mypy-coverage]
+ignore_missing_imports = True
+
+[mypy-ansible_release]
+ignore_missing_imports = True
+
+[mypy-StringIO]
+ignore_missing_imports = True
diff --git a/test/lib/ansible_test/_util/controller/sanity/mypy/modules.ini b/test/lib/ansible_test/_util/controller/sanity/mypy/modules.ini
new file mode 100644
index 0000000..d6a608f
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/mypy/modules.ini
@@ -0,0 +1,98 @@
+# IMPORTANT
+# Set "ignore_missing_imports" per package below, rather than globally.
+# That will help identify missing type stubs that should be added to the sanity test environment.
+
+[mypy]
+
+[mypy-ansible.module_utils.six.moves.*]
+ignore_missing_imports = True
+
+[mypy-pexpect.*]
+ignore_missing_imports = True
+
+[mypy-md5.*]
+ignore_missing_imports = True
+
+[mypy-yum.*]
+ignore_missing_imports = True
+
+[mypy-rpmUtils.*]
+ignore_missing_imports = True
+
+[mypy-rpm.*]
+ignore_missing_imports = True
+
+[mypy-psutil.*]
+ignore_missing_imports = True
+
+[mypy-dnf.*]
+ignore_missing_imports = True
+
+[mypy-apt.*]
+ignore_missing_imports = True
+
+[mypy-apt_pkg.*]
+ignore_missing_imports = True
+
+[mypy-gssapi.*]
+ignore_missing_imports = True
+
+[mypy-_ssl.*]
+ignore_missing_imports = True
+
+[mypy-urllib_gssapi.*]
+ignore_missing_imports = True
+
+[mypy-systemd.*]
+ignore_missing_imports = True
+
+[mypy-sha.*]
+ignore_missing_imports = True
+
+[mypy-distro.*]
+ignore_missing_imports = True
+
+[mypy-selectors2.*]
+ignore_missing_imports = True
+
+[mypy-selinux.*]
+ignore_missing_imports = True
+
+[mypy-urllib2.*]
+ignore_missing_imports = True
+
+[mypy-httplib.*]
+ignore_missing_imports = True
+
+[mypy-compiler.*]
+ignore_missing_imports = True
+
+[mypy-aptsources.*]
+ignore_missing_imports = True
+
+[mypy-urllib3.*]
+ignore_missing_imports = True
+
+[mypy-requests.*]
+ignore_missing_imports = True
+
+[mypy-pkg_resources.*]
+ignore_missing_imports = True
+
+[mypy-urllib.*]
+ignore_missing_imports = True
+
+[mypy-email.*]
+ignore_missing_imports = True
+
+[mypy-selectors.*]
+ignore_missing_imports = True
+
+[mypy-importlib.*]
+ignore_missing_imports = True
+
+[mypy-collections.*]
+ignore_missing_imports = True
+
+[mypy-http.*]
+ignore_missing_imports = True
diff --git a/test/lib/ansible_test/_util/controller/sanity/pep8/current-ignore.txt b/test/lib/ansible_test/_util/controller/sanity/pep8/current-ignore.txt
new file mode 100644
index 0000000..659c7f5
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pep8/current-ignore.txt
@@ -0,0 +1,4 @@
+E402
+W503
+W504
+E741
diff --git a/test/lib/ansible_test/_util/controller/sanity/pslint/pslint.ps1 b/test/lib/ansible_test/_util/controller/sanity/pslint/pslint.ps1
new file mode 100644
index 0000000..0cf3c7f
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pslint/pslint.ps1
@@ -0,0 +1,37 @@
+#Requires -Version 6
+#Requires -Modules PSScriptAnalyzer, PSSA-PSCustomUseLiteralPath
+
+$ErrorActionPreference = "Stop"
+$WarningPreference = "Stop"
+
+$LiteralPathRule = Import-Module -Name PSSA-PSCustomUseLiteralPath -PassThru
+$LiteralPathRulePath = Join-Path -Path $LiteralPathRule.ModuleBase -ChildPath $LiteralPathRule.RootModule
+
+$PSSAParams = @{
+ CustomRulePath = @($LiteralPathRulePath)
+ IncludeDefaultRules = $true
+ Setting = (Join-Path -Path $PSScriptRoot -ChildPath "settings.psd1")
+}
+
+$Results = @(
+ ForEach ($Path in $Args) {
+ $Retries = 3
+
+ Do {
+ Try {
+ Invoke-ScriptAnalyzer -Path $Path @PSSAParams 3> $null
+ $Retries = 0
+ }
+ Catch {
+ If (--$Retries -le 0) {
+ Throw
+ }
+ }
+ }
+ Until ($Retries -le 0)
+ }
+)
+
+# Since pwsh 7.1 results that exceed depth will produce a warning which fails the process.
+# Ignore warnings only for this step.
+ConvertTo-Json -InputObject $Results -Depth 1 -WarningAction SilentlyContinue
diff --git a/test/lib/ansible_test/_util/controller/sanity/pslint/settings.psd1 b/test/lib/ansible_test/_util/controller/sanity/pslint/settings.psd1
new file mode 100644
index 0000000..2ae13b4
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pslint/settings.psd1
@@ -0,0 +1,52 @@
+@{
+ Rules = @{
+ PSAvoidLongLines = @{
+ Enable = $true
+ MaximumLineLength = 160
+ }
+ PSPlaceOpenBrace = @{
+ Enable = $true
+ OnSameLine = $true
+ IgnoreOneLineBlock = $true
+ NewLineAfter = $true
+ }
+ PSPlaceCloseBrace = @{
+ Enable = $true
+ IgnoreOneLineBlock = $true
+ NewLineAfter = $true
+ NoEmptyLineBefore = $false
+ }
+ PSUseConsistentIndentation = @{
+ Enable = $true
+ IndentationSize = 4
+ PipelineIndentation = 'IncreaseIndentationForFirstPipeline'
+ Kind = 'space'
+ }
+ PSUseConsistentWhitespace = @{
+ Enable = $true
+ CheckInnerBrace = $true
+ CheckOpenBrace = $true
+ CheckOpenParen = $true
+ CheckOperator = $true
+ CheckPipe = $true
+ CheckPipeForRedundantWhitespace = $false
+ CheckSeparator = $true
+ CheckParameter = $false
+ IgnoreAssignmentOperatorInsideHashTable = $false
+ }
+ }
+ ExcludeRules = @(
+ 'PSUseOutputTypeCorrectly',
+ 'PSUseShouldProcessForStateChangingFunctions',
+ # We send strings as plaintext so will always come across the 3 issues
+ 'PSAvoidUsingPlainTextForPassword',
+ 'PSAvoidUsingConvertToSecureStringWithPlainText',
+ 'PSAvoidUsingUserNameAndPassWordParams',
+ # We send the module as a base64 encoded string and a BOM will cause
+ # issues here
+ 'PSUseBOMForUnicodeEncodedFile',
+ # Too many false positives, there are many cases where shared utils
+ # invoke user defined code but not all parameters are used.
+ 'PSReviewUnusedParameter'
+ )
+}
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test-target.cfg b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test-target.cfg
new file mode 100644
index 0000000..aa34772
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test-target.cfg
@@ -0,0 +1,57 @@
+[MESSAGES CONTROL]
+
+disable=
+ consider-using-f-string, # Python 2.x support still required
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-method, # results vary by Python version
+ deprecated-module, # results vary by Python version
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ import-outside-toplevel, # common pattern in ansible related code
+ raise-missing-from, # Python 2.x does not support raise from
+ super-with-arguments, # Python 2.x does not support super without arguments
+ redundant-u-string-prefix, # Python 2.x support still required
+ too-few-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-return-statements,
+ too-many-statements,
+ useless-return, # complains about returning None when the return type is optional
+
+[BASIC]
+
+bad-names=
+ _,
+ bar,
+ baz,
+ foo,
+ tata,
+ toto,
+ tutu,
+
+good-names=
+ __metaclass__,
+ C,
+ ex,
+ i,
+ j,
+ k,
+ Run,
+
+class-attribute-rgx=[A-Za-z_][A-Za-z0-9_]{1,40}$
+attr-rgx=[a-z_][a-z0-9_]{1,40}$
+method-rgx=[a-z_][a-z0-9_]{1,40}$
+function-rgx=[a-z_][a-z0-9_]{1,40}$
+
+[IMPORTS]
+
+preferred-modules =
+ distutils.version:ansible.module_utils.compat.version,
+
+# These modules are used by ansible-test, but will not be present in the virtual environment running pylint.
+# Listing them here makes it possible to enable the import-error check.
+ignored-modules =
+ py,
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg
new file mode 100644
index 0000000..1c03472
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/config/ansible-test.cfg
@@ -0,0 +1,63 @@
+[MESSAGES CONTROL]
+
+disable=
+ consider-using-f-string, # many occurrences
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-method, # results vary by Python version
+ deprecated-module, # results vary by Python version
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ import-outside-toplevel, # common pattern in ansible related code
+ raise-missing-from, # Python 2.x does not support raise from
+ too-few-public-methods,
+ too-many-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-return-statements,
+ too-many-statements,
+ unspecified-encoding, # always run with UTF-8 encoding enforced
+ useless-return, # complains about returning None when the return type is optional
+
+[BASIC]
+
+bad-names=
+ _,
+ bar,
+ baz,
+ foo,
+ tata,
+ toto,
+ tutu,
+
+good-names=
+ __metaclass__,
+ C,
+ ex,
+ i,
+ j,
+ k,
+ Run,
+
+class-attribute-rgx=[A-Za-z_][A-Za-z0-9_]{1,40}$
+attr-rgx=[a-z_][a-z0-9_]{1,40}$
+method-rgx=[a-z_][a-z0-9_]{1,40}$
+function-rgx=[a-z_][a-z0-9_]{1,40}$
+
+# Use the regex from earlier versions of pylint.
+# See: https://github.com/PyCQA/pylint/pull/7322
+typevar-rgx=^_{0,2}(?:[^\W\da-z_]+|(?:[^\W\da-z_]+[^\WA-Z_]+)+T?(?<!Type))(?:_co(?:ntra)?)?$
+
+[IMPORTS]
+
+preferred-modules =
+ distutils.version:ansible.module_utils.compat.version,
+
+# These modules are used by ansible-test, but will not be present in the virtual environment running pylint.
+# Listing them here makes it possible to enable the import-error check.
+ignored-modules =
+ cryptography,
+ coverage,
+ yamllint,
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/config/code-smell.cfg b/test/lib/ansible_test/_util/controller/sanity/pylint/config/code-smell.cfg
new file mode 100644
index 0000000..e3aa8ee
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/config/code-smell.cfg
@@ -0,0 +1,57 @@
+[MESSAGES CONTROL]
+
+disable=
+ consider-using-f-string, # many occurrences
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-method, # results vary by Python version
+ deprecated-module, # results vary by Python version
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ import-outside-toplevel, # common pattern in ansible related code
+ raise-missing-from, # Python 2.x does not support raise from
+ too-few-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-return-statements,
+ too-many-statements,
+ unspecified-encoding, # always run with UTF-8 encoding enforced
+ useless-return, # complains about returning None when the return type is optional
+
+[BASIC]
+
+bad-names=
+ _,
+ bar,
+ baz,
+ foo,
+ tata,
+ toto,
+ tutu,
+
+good-names=
+ __metaclass__,
+ C,
+ ex,
+ i,
+ j,
+ k,
+ Run,
+
+class-attribute-rgx=[A-Za-z_][A-Za-z0-9_]{1,40}$
+attr-rgx=[a-z_][a-z0-9_]{1,40}$
+method-rgx=[a-z_][a-z0-9_]{1,40}$
+function-rgx=[a-z_][a-z0-9_]{1,40}$
+module-rgx=[a-z_][a-z0-9_-]{2,40}$
+
+[IMPORTS]
+
+preferred-modules =
+ distutils.version:ansible.module_utils.compat.version,
+
+# These modules are used by ansible-test, but will not be present in the virtual environment running pylint.
+# Listing them here makes it possible to enable the import-error check.
+ignored-modules =
+ voluptuous,
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/config/collection.cfg b/test/lib/ansible_test/_util/controller/sanity/pylint/config/collection.cfg
new file mode 100644
index 0000000..38b8d2d
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/config/collection.cfg
@@ -0,0 +1,147 @@
+[MESSAGES CONTROL]
+
+disable=
+ abstract-method,
+ access-member-before-definition,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension, # requires Python 2.7+, but we still require Python 2.6 support
+ consider-using-dict-items,
+ consider-using-enumerate,
+ consider-using-f-string, # Python 2.x support still required
+ consider-using-generator,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension, # requires Python 2.7+, but we still require Python 2.6 support
+ consider-using-ternary,
+ consider-using-with,
+ consider-using-max-builtin,
+ consider-using-min-builtin,
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-method, # results vary by Python version
+ deprecated-module, # results vary by Python version
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error, # inconsistent results which depend on the availability of imports
+ import-outside-toplevel, # common pattern in ansible related code
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ missing-docstring,
+ no-else-break,
+ no-else-continue,
+ no-else-raise,
+ no-else-return,
+ no-member,
+ no-name-in-module, # inconsistent results which depend on the availability of imports
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ raise-missing-from, # Python 2.x does not support raise from
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redundant-u-string-prefix, # Python 2.x support still required
+ reimported,
+ relative-beyond-top-level, # https://github.com/PyCQA/pylint/issues/2967
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ super-with-arguments, # Python 2.x does not support super without arguments
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors, # inconsistent results between python 3.6 and 3.7+
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unnecessary-dunder-call,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ unspecified-encoding, # always run with UTF-8 encoding enforced
+ use-dict-literal, # many occurrences
+ use-list-literal, # many occurrences
+ use-implicit-booleaness-not-comparison, # many occurrences
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ useless-option-value, # provide backwards compatibility with previous versions of ansible-test
+ wrong-import-order,
+ wrong-import-position,
+
+[BASIC]
+
+bad-names=
+ _,
+ bar,
+ baz,
+ foo,
+ tata,
+ toto,
+ tutu,
+
+good-names=
+ ex,
+ i,
+ j,
+ k,
+ Run,
+
+[TYPECHECK]
+
+ignored-modules=
+ _MovedItems,
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/config/default.cfg b/test/lib/ansible_test/_util/controller/sanity/pylint/config/default.cfg
new file mode 100644
index 0000000..6a242b8
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/config/default.cfg
@@ -0,0 +1,146 @@
+[MESSAGES CONTROL]
+
+disable=
+ import-outside-toplevel, # common pattern in ansible related code
+ abstract-method,
+ access-member-before-definition,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-items,
+ consider-using-enumerate,
+ consider-using-f-string, # Python 2.x support still required
+ consider-using-get,
+ consider-using-in,
+ consider-using-ternary,
+ consider-using-with,
+ consider-using-max-builtin,
+ consider-using-min-builtin,
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-method, # results vary by Python version
+ deprecated-module, # results vary by Python version
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error, # inconsistent results which depend on the availability of imports
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ missing-docstring,
+ no-else-break,
+ no-else-continue,
+ no-else-raise,
+ no-else-return,
+ no-member,
+ no-name-in-module, # inconsistent results which depend on the availability of imports
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ raise-missing-from, # Python 2.x does not support raise from
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redundant-u-string-prefix, # Python 2.x support still required
+ reimported,
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ super-with-arguments, # Python 2.x does not support super without arguments
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors, # inconsistent results between python 3.6 and 3.7+
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ unspecified-encoding, # always run with UTF-8 encoding enforced
+ use-dict-literal, # many occurrences
+ use-list-literal, # many occurrences
+ use-implicit-booleaness-not-comparison, # many occurrences
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+[BASIC]
+
+bad-names=
+ _,
+ bar,
+ baz,
+ foo,
+ tata,
+ toto,
+ tutu,
+
+good-names=
+ ex,
+ i,
+ j,
+ k,
+ Run,
+
+[TYPECHECK]
+
+ignored-modules=
+ _MovedItems,
+
+[IMPORTS]
+
+preferred-modules =
+ distutils.version:ansible.module_utils.compat.version,
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py
new file mode 100644
index 0000000..79b8bf1
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py
@@ -0,0 +1,263 @@
+"""Ansible specific plyint plugin for checking deprecations."""
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
+from __future__ import annotations
+
+import datetime
+import re
+import typing as t
+
+import astroid
+
+from pylint.interfaces import IAstroidChecker
+from pylint.checkers import BaseChecker
+from pylint.checkers.utils import check_messages
+
+from ansible.module_utils.compat.version import LooseVersion
+from ansible.module_utils.six import string_types
+from ansible.release import __version__ as ansible_version_raw
+from ansible.utils.version import SemanticVersion
+
+MSGS = {
+ 'E9501': ("Deprecated version (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "ansible-deprecated-version",
+ "Used when a call to Display.deprecated specifies a version "
+ "less than or equal to the current version of Ansible",
+ {'minversion': (2, 6)}),
+ 'E9502': ("Display.deprecated call without a version or date",
+ "ansible-deprecated-no-version",
+ "Used when a call to Display.deprecated does not specify a "
+ "version or date",
+ {'minversion': (2, 6)}),
+ 'E9503': ("Invalid deprecated version (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-invalid-deprecated-version",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "Ansible version number",
+ {'minversion': (2, 6)}),
+ 'E9504': ("Deprecated version (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "collection-deprecated-version",
+ "Used when a call to Display.deprecated specifies a collection "
+ "version less than or equal to the current version of this "
+ "collection",
+ {'minversion': (2, 6)}),
+ 'E9505': ("Invalid deprecated version (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "collection-invalid-deprecated-version",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "collection version number",
+ {'minversion': (2, 6)}),
+ 'E9506': ("No collection name found in call to Display.deprecated or "
+ "AnsibleModule.deprecate",
+ "ansible-deprecated-no-collection-name",
+ "The current collection name in format `namespace.name` must "
+ "be provided as collection_name when calling Display.deprecated "
+ "or AnsibleModule.deprecate (`ansible.builtin` for ansible-core)",
+ {'minversion': (2, 6)}),
+ 'E9507': ("Wrong collection name (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "wrong-collection-deprecated",
+ "The name of the current collection must be passed to the "
+ "Display.deprecated resp. AnsibleModule.deprecate calls "
+ "(`ansible.builtin` for ansible-core)",
+ {'minversion': (2, 6)}),
+ 'E9508': ("Expired date (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "ansible-deprecated-date",
+ "Used when a call to Display.deprecated specifies a date "
+ "before today",
+ {'minversion': (2, 6)}),
+ 'E9509': ("Invalid deprecated date (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-invalid-deprecated-date",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "date. It must be a string in format `YYYY-MM-DD` (ISO 8601)",
+ {'minversion': (2, 6)}),
+ 'E9510': ("Both version and date found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-deprecated-both-version-and-date",
+ "Only one of version and date must be specified",
+ {'minversion': (2, 6)}),
+ 'E9511': ("Removal version (%r) must be a major release, not a minor or "
+ "patch release (see the specification at https://semver.org/)",
+ "removal-version-must-be-major",
+ "Used when a call to Display.deprecated or "
+ "AnsibleModule.deprecate for a collection specifies a version "
+ "which is not of the form x.0.0",
+ {'minversion': (2, 6)}),
+}
+
+
+ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3]))
+
+
+def _get_expr_name(node):
+ """Funciton to get either ``attrname`` or ``name`` from ``node.func.expr``
+
+ Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated``
+ """
+ try:
+ return node.func.expr.attrname
+ except AttributeError:
+ # If this fails too, we'll let it raise, the caller should catch it
+ return node.func.expr.name
+
+
+def parse_isodate(value):
+ """Parse an ISO 8601 date string."""
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
+ if not isinstance(value, string_types):
+ raise ValueError(msg)
+ # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
+ # we have to do things manually.
+ if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
+ raise ValueError(msg)
+ try:
+ return datetime.datetime.strptime(value, '%Y-%m-%d').date()
+ except ValueError:
+ raise ValueError(msg)
+
+
+class AnsibleDeprecatedChecker(BaseChecker):
+ """Checks for Display.deprecated calls to ensure that the ``version``
+ has not passed or met the time for removal
+ """
+
+ __implements__ = (IAstroidChecker,)
+ name = 'deprecated'
+ msgs = MSGS
+
+ options = (
+ ('collection-name', {
+ 'default': None,
+ 'type': 'string',
+ 'metavar': '<name>',
+ 'help': 'The collection\'s name used to check collection names in deprecations.',
+ }),
+ ('collection-version', {
+ 'default': None,
+ 'type': 'string',
+ 'metavar': '<version>',
+ 'help': 'The collection\'s version number used to check deprecations.',
+ }),
+ )
+
+ def _check_date(self, node, date):
+ if not isinstance(date, str):
+ self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,))
+ return
+
+ try:
+ date_parsed = parse_isodate(date)
+ except ValueError:
+ self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,))
+ return
+
+ if date_parsed < datetime.date.today():
+ self.add_message('ansible-deprecated-date', node=node, args=(date,))
+
+ def _check_version(self, node, version, collection_name):
+ if not isinstance(version, (str, float)):
+ if collection_name == 'ansible.builtin':
+ symbol = 'ansible-invalid-deprecated-version'
+ else:
+ symbol = 'collection-invalid-deprecated-version'
+ self.add_message(symbol, node=node, args=(version,))
+ return
+
+ version_no = str(version)
+
+ if collection_name == 'ansible.builtin':
+ # Ansible-base
+ try:
+ if not version_no:
+ raise ValueError('Version string should not be empty')
+ loose_version = LooseVersion(str(version_no))
+ if ANSIBLE_VERSION >= loose_version:
+ self.add_message('ansible-deprecated-version', node=node, args=(version,))
+ except ValueError:
+ self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,))
+ elif collection_name:
+ # Collections
+ try:
+ if not version_no:
+ raise ValueError('Version string should not be empty')
+ semantic_version = SemanticVersion(version_no)
+ if collection_name == self.collection_name and self.collection_version is not None:
+ if self.collection_version >= semantic_version:
+ self.add_message('collection-deprecated-version', node=node, args=(version,))
+ if semantic_version.major != 0 and (semantic_version.minor != 0 or semantic_version.patch != 0):
+ self.add_message('removal-version-must-be-major', node=node, args=(version,))
+ except ValueError:
+ self.add_message('collection-invalid-deprecated-version', node=node, args=(version,))
+
+ @property
+ def collection_name(self) -> t.Optional[str]:
+ """Return the collection name, or None if ansible-core is being tested."""
+ return self.config.collection_name
+
+ @property
+ def collection_version(self) -> t.Optional[SemanticVersion]:
+ """Return the collection version, or None if ansible-core is being tested."""
+ return SemanticVersion(self.config.collection_version) if self.config.collection_version is not None else None
+
+ @check_messages(*(MSGS.keys()))
+ def visit_call(self, node):
+ """Visit a call node."""
+ version = None
+ date = None
+ collection_name = None
+ try:
+ if (node.func.attrname == 'deprecated' and 'display' in _get_expr_name(node) or
+ node.func.attrname == 'deprecate' and _get_expr_name(node)):
+ if node.keywords:
+ for keyword in node.keywords:
+ if len(node.keywords) == 1 and keyword.arg is None:
+ # This is likely a **kwargs splat
+ return
+ if keyword.arg == 'version':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ version = keyword.value.value
+ if keyword.arg == 'date':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ date = keyword.value.value
+ if keyword.arg == 'collection_name':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ collection_name = keyword.value.value
+ if not version and not date:
+ try:
+ version = node.args[1].value
+ except IndexError:
+ self.add_message('ansible-deprecated-no-version', node=node)
+ return
+ if version and date:
+ self.add_message('ansible-deprecated-both-version-and-date', node=node)
+
+ if collection_name:
+ this_collection = collection_name == (self.collection_name or 'ansible.builtin')
+ if not this_collection:
+ self.add_message('wrong-collection-deprecated', node=node, args=(collection_name,))
+ elif self.collection_name is not None:
+ self.add_message('ansible-deprecated-no-collection-name', node=node)
+
+ if date:
+ self._check_date(node, date)
+ elif version:
+ self._check_version(node, version, collection_name)
+ except AttributeError:
+ # Not the type of node we are interested in
+ pass
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleDeprecatedChecker(linter))
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py
new file mode 100644
index 0000000..934a9ae
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/string_format.py
@@ -0,0 +1,85 @@
+"""Ansible specific pylint plugin for checking format string usage."""
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
+from __future__ import annotations
+
+import astroid
+from pylint.interfaces import IAstroidChecker
+from pylint.checkers import BaseChecker
+from pylint.checkers import utils
+from pylint.checkers.utils import check_messages
+try:
+ from pylint.checkers.utils import parse_format_method_string
+except ImportError:
+ # noinspection PyUnresolvedReferences
+ from pylint.checkers.strings import parse_format_method_string
+
+MSGS = {
+ 'E9305': ("Format string contains automatic field numbering "
+ "specification",
+ "ansible-format-automatic-specification",
+ "Used when a PEP 3101 format string contains automatic "
+ "field numbering (e.g. '{}').",
+ {'minversion': (2, 6)}),
+ 'E9390': ("bytes object has no .format attribute",
+ "ansible-no-format-on-bytestring",
+ "Used when a bytestring was used as a PEP 3101 format string "
+ "as Python3 bytestrings do not have a .format attribute",
+ {'minversion': (3, 0)}),
+}
+
+
+class AnsibleStringFormatChecker(BaseChecker):
+ """Checks string formatting operations to ensure that the format string
+ is valid and the arguments match the format string.
+ """
+
+ __implements__ = (IAstroidChecker,)
+ name = 'string'
+ msgs = MSGS
+
+ @check_messages(*(MSGS.keys()))
+ def visit_call(self, node):
+ """Visit a call node."""
+ func = utils.safe_infer(node.func)
+ if (isinstance(func, astroid.BoundMethod)
+ and isinstance(func.bound, astroid.Instance)
+ and func.bound.name in ('str', 'unicode', 'bytes')):
+ if func.name == 'format':
+ self._check_new_format(node, func)
+
+ def _check_new_format(self, node, func):
+ """ Check the new string formatting """
+ if (isinstance(node.func, astroid.Attribute)
+ and not isinstance(node.func.expr, astroid.Const)):
+ return
+ try:
+ strnode = next(func.bound.infer())
+ except astroid.InferenceError:
+ return
+ if not isinstance(strnode, astroid.Const):
+ return
+
+ if isinstance(strnode.value, bytes):
+ self.add_message('ansible-no-format-on-bytestring', node=node)
+ return
+ if not isinstance(strnode.value, str):
+ return
+
+ if node.starargs or node.kwargs:
+ return
+ try:
+ num_args = parse_format_method_string(strnode.value)[1]
+ except utils.IncompleteFormatString:
+ return
+
+ if num_args:
+ self.add_message('ansible-format-automatic-specification',
+ node=node)
+ return
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleStringFormatChecker(linter))
diff --git a/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py
new file mode 100644
index 0000000..1be42f5
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/pylint/plugins/unwanted.py
@@ -0,0 +1,223 @@
+"""A plugin for pylint to identify imports and functions which should not be used."""
+from __future__ import annotations
+
+import os
+import typing as t
+
+import astroid
+
+from pylint.checkers import BaseChecker
+from pylint.interfaces import IAstroidChecker
+
+ANSIBLE_TEST_MODULES_PATH = os.environ['ANSIBLE_TEST_MODULES_PATH']
+ANSIBLE_TEST_MODULE_UTILS_PATH = os.environ['ANSIBLE_TEST_MODULE_UTILS_PATH']
+
+
+class UnwantedEntry:
+ """Defines an unwanted import."""
+ def __init__(
+ self,
+ alternative, # type: str
+ modules_only=False, # type: bool
+ names=None, # type: t.Optional[t.Tuple[str, ...]]
+ ignore_paths=None, # type: t.Optional[t.Tuple[str, ...]]
+ ansible_test_only=False, # type: bool
+ ): # type: (...) -> None
+ self.alternative = alternative
+ self.modules_only = modules_only
+ self.names = set(names) if names else set()
+ self.ignore_paths = ignore_paths
+ self.ansible_test_only = ansible_test_only
+
+ def applies_to(self, path, name=None): # type: (str, t.Optional[str]) -> bool
+ """Return True if this entry applies to the given path, otherwise return False."""
+ if self.names:
+ if not name:
+ return False
+
+ if name not in self.names:
+ return False
+
+ if self.ignore_paths and any(path.endswith(ignore_path) for ignore_path in self.ignore_paths):
+ return False
+
+ if self.ansible_test_only and '/test/lib/ansible_test/_internal/' not in path:
+ return False
+
+ if self.modules_only:
+ return is_module_path(path)
+
+ return True
+
+
+def is_module_path(path): # type: (str) -> bool
+ """Return True if the given path is a module or module_utils path, otherwise return False."""
+ return path.startswith(ANSIBLE_TEST_MODULES_PATH) or path.startswith(ANSIBLE_TEST_MODULE_UTILS_PATH)
+
+
+class AnsibleUnwantedChecker(BaseChecker):
+ """Checker for unwanted imports and functions."""
+ __implements__ = (IAstroidChecker,)
+
+ name = 'unwanted'
+
+ BAD_IMPORT = 'ansible-bad-import'
+ BAD_IMPORT_FROM = 'ansible-bad-import-from'
+ BAD_FUNCTION = 'ansible-bad-function'
+ BAD_MODULE_IMPORT = 'ansible-bad-module-import'
+
+ msgs = dict(
+ E5101=('Import %s instead of %s',
+ BAD_IMPORT,
+ 'Identifies imports which should not be used.'),
+ E5102=('Import %s from %s instead of %s',
+ BAD_IMPORT_FROM,
+ 'Identifies imports which should not be used.'),
+ E5103=('Call %s instead of %s',
+ BAD_FUNCTION,
+ 'Identifies functions which should not be used.'),
+ E5104=('Import external package or ansible.module_utils not %s',
+ BAD_MODULE_IMPORT,
+ 'Identifies imports which should not be used.'),
+ )
+
+ unwanted_imports = dict(
+ # Additional imports that we may want to start checking:
+ # boto=UnwantedEntry('boto3', modules_only=True),
+ # requests=UnwantedEntry('ansible.module_utils.urls', modules_only=True),
+ # urllib=UnwantedEntry('ansible.module_utils.urls', modules_only=True),
+
+ # see https://docs.python.org/2/library/urllib2.html
+ urllib2=UnwantedEntry('ansible.module_utils.urls',
+ ignore_paths=(
+ '/lib/ansible/module_utils/urls.py',
+ )),
+
+ # see https://docs.python.org/3/library/collections.abc.html
+ collections=UnwantedEntry('ansible.module_utils.common._collections_compat',
+ ignore_paths=(
+ '/lib/ansible/module_utils/common/_collections_compat.py',
+ ),
+ names=(
+ 'MappingView',
+ 'ItemsView',
+ 'KeysView',
+ 'ValuesView',
+ 'Mapping', 'MutableMapping',
+ 'Sequence', 'MutableSequence',
+ 'Set', 'MutableSet',
+ 'Container',
+ 'Hashable',
+ 'Sized',
+ 'Callable',
+ 'Iterable',
+ 'Iterator',
+ )),
+ )
+
+ unwanted_functions = {
+ # see https://docs.python.org/3/library/tempfile.html#tempfile.mktemp
+ 'tempfile.mktemp': UnwantedEntry('tempfile.mkstemp'),
+
+ # os.chmod resolves as posix.chmod
+ 'posix.chmod': UnwantedEntry('verified_chmod',
+ ansible_test_only=True),
+
+ 'sys.exit': UnwantedEntry('exit_json or fail_json',
+ ignore_paths=(
+ '/lib/ansible/module_utils/basic.py',
+ '/lib/ansible/modules/async_wrapper.py',
+ ),
+ modules_only=True),
+
+ 'builtins.print': UnwantedEntry('module.log or module.debug',
+ ignore_paths=(
+ '/lib/ansible/module_utils/basic.py',
+ ),
+ modules_only=True),
+ }
+
+ def visit_import(self, node): # type: (astroid.node_classes.Import) -> None
+ """Visit an import node."""
+ for name in node.names:
+ self._check_import(node, name[0])
+
+ def visit_importfrom(self, node): # type: (astroid.node_classes.ImportFrom) -> None
+ """Visit an import from node."""
+ self._check_importfrom(node, node.modname, node.names)
+
+ def visit_attribute(self, node): # type: (astroid.node_classes.Attribute) -> None
+ """Visit an attribute node."""
+ last_child = node.last_child()
+
+ # this is faster than using type inference and will catch the most common cases
+ if not isinstance(last_child, astroid.node_classes.Name):
+ return
+
+ module = last_child.name
+
+ entry = self.unwanted_imports.get(module)
+
+ if entry and entry.names:
+ if entry.applies_to(self.linter.current_file, node.attrname):
+ self.add_message(self.BAD_IMPORT_FROM, args=(node.attrname, entry.alternative, module), node=node)
+
+ def visit_call(self, node): # type: (astroid.node_classes.Call) -> None
+ """Visit a call node."""
+ try:
+ for i in node.func.inferred():
+ func = None
+
+ if isinstance(i, astroid.scoped_nodes.FunctionDef) and isinstance(i.parent, astroid.scoped_nodes.Module):
+ func = '%s.%s' % (i.parent.name, i.name)
+
+ if not func:
+ continue
+
+ entry = self.unwanted_functions.get(func)
+
+ if entry and entry.applies_to(self.linter.current_file):
+ self.add_message(self.BAD_FUNCTION, args=(entry.alternative, func), node=node)
+ except astroid.exceptions.InferenceError:
+ pass
+
+ def _check_import(self, node, modname): # type: (astroid.node_classes.Import, str) -> None
+ """Check the imports on the specified import node."""
+ self._check_module_import(node, modname)
+
+ entry = self.unwanted_imports.get(modname)
+
+ if not entry:
+ return
+
+ if entry.applies_to(self.linter.current_file):
+ self.add_message(self.BAD_IMPORT, args=(entry.alternative, modname), node=node)
+
+ def _check_importfrom(self, node, modname, names): # type: (astroid.node_classes.ImportFrom, str, t.List[str]) -> None
+ """Check the imports on the specified import from node."""
+ self._check_module_import(node, modname)
+
+ entry = self.unwanted_imports.get(modname)
+
+ if not entry:
+ return
+
+ for name in names:
+ if entry.applies_to(self.linter.current_file, name[0]):
+ self.add_message(self.BAD_IMPORT_FROM, args=(name[0], entry.alternative, modname), node=node)
+
+ def _check_module_import(self, node, modname): # type: (t.Union[astroid.node_classes.Import, astroid.node_classes.ImportFrom], str) -> None
+ """Check the module import on the given import or import from node."""
+ if not is_module_path(self.linter.current_file):
+ return
+
+ if modname == 'ansible.module_utils' or modname.startswith('ansible.module_utils.'):
+ return
+
+ if modname == 'ansible' or modname.startswith('ansible.'):
+ self.add_message(self.BAD_MODULE_IMPORT, args=(modname,), node=node)
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleUnwantedChecker(linter))
diff --git a/test/lib/ansible_test/_util/controller/sanity/shellcheck/exclude.txt b/test/lib/ansible_test/_util/controller/sanity/shellcheck/exclude.txt
new file mode 100644
index 0000000..29588dd
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/shellcheck/exclude.txt
@@ -0,0 +1,3 @@
+SC1090
+SC1091
+SC2164
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate.py
new file mode 100644
index 0000000..ee7e832
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate.py
@@ -0,0 +1,6 @@
+from __future__ import annotations
+
+from validate_modules.main import main
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py
new file mode 100644
index 0000000..1cfd6ac
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import annotations
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py
new file mode 100644
index 0000000..270c9f4
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/main.py
@@ -0,0 +1,2520 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import annotations
+
+import abc
+import argparse
+import ast
+import datetime
+import json
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import traceback
+import warnings
+
+from collections import OrderedDict
+from collections.abc import Mapping
+from contextlib import contextmanager
+from fnmatch import fnmatch
+
+import yaml
+
+from voluptuous.humanize import humanize_error
+
+
+def setup_collection_loader():
+ """
+ Configure the collection loader if a collection is being tested.
+ This must be done before the plugin loader is imported.
+ """
+ if '--collection' not in sys.argv:
+ return
+
+ # noinspection PyProtectedMember
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+
+ collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep)
+ collection_loader = _AnsibleCollectionFinder(collections_paths)
+ # noinspection PyProtectedMember
+ collection_loader._install() # pylint: disable=protected-access
+
+ warnings.filterwarnings(
+ "ignore",
+ "AnsibleCollectionFinder has already been configured")
+
+
+setup_collection_loader()
+
+from ansible import __version__ as ansible_version
+from ansible.executor.module_common import REPLACER_WINDOWS, NEW_STYLE_PYTHON_MODULE_RE
+from ansible.module_utils.common.parameters import DEFAULT_TYPE_VALIDATORS
+from ansible.module_utils.compat.version import StrictVersion, LooseVersion
+from ansible.module_utils.basic import to_bytes
+from ansible.module_utils.six import PY3, with_metaclass, string_types
+from ansible.plugins.loader import fragment_loader
+from ansible.plugins.list import IGNORE as REJECTLIST
+from ansible.utils.plugin_docs import add_collection_to_versions_and_dates, add_fragments, get_docstring
+from ansible.utils.version import SemanticVersion
+
+from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec
+
+from .schema import ansible_module_kwargs_schema, doc_schema, return_schema
+
+from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate
+
+
+if PY3:
+ # Because there is no ast.TryExcept in Python 3 ast module
+ TRY_EXCEPT = ast.Try
+ # REPLACER_WINDOWS from ansible.executor.module_common is byte
+ # string but we need unicode for Python 3
+ REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
+else:
+ TRY_EXCEPT = ast.TryExcept
+
+REJECTLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
+INDENT_REGEX = re.compile(r'([\t]*)')
+TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\([^)].*')
+SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
+NO_LOG_REGEX = re.compile(r'(?:pass(?!ive)|secret|token|key)', re.I)
+
+
+REJECTLIST_IMPORTS = {
+ 'requests': {
+ 'new_only': True,
+ 'error': {
+ 'code': 'use-module-utils-urls',
+ 'msg': ('requests import found, should use '
+ 'ansible.module_utils.urls instead')
+ }
+ },
+ r'boto(?:\.|$)': {
+ 'new_only': True,
+ 'error': {
+ 'code': 'use-boto3',
+ 'msg': 'boto import found, new modules should use boto3'
+ }
+ },
+}
+SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*')
+OS_CALL_REGEX = re.compile(r'os\.call.*')
+
+
+LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3]))
+
+
+PLUGINS_WITH_RETURN_VALUES = ('module', )
+PLUGINS_WITH_EXAMPLES = ('module', )
+PLUGINS_WITH_YAML_EXAMPLES = ('module', )
+
+
+def is_potential_secret_option(option_name):
+ if not NO_LOG_REGEX.search(option_name):
+ return False
+ # If this is a count, type, algorithm, timeout, filename, or name, it is probably not a secret
+ if option_name.endswith((
+ '_count', '_type', '_alg', '_algorithm', '_timeout', '_name', '_comment',
+ '_bits', '_id', '_identifier', '_period', '_file', '_filename',
+ )):
+ return False
+ # 'key' also matches 'publickey', which is generally not secret
+ if any(part in option_name for part in (
+ 'publickey', 'public_key', 'keyusage', 'key_usage', 'keyserver', 'key_server',
+ 'keysize', 'key_size', 'keyservice', 'key_service', 'pub_key', 'pubkey',
+ 'keyboard', 'secretary',
+ )):
+ return False
+ return True
+
+
+def compare_dates(d1, d2):
+ try:
+ date1 = parse_isodate(d1, allow_date=True)
+ date2 = parse_isodate(d2, allow_date=True)
+ return date1 == date2
+ except ValueError:
+ # At least one of d1 and d2 cannot be parsed. Simply compare values.
+ return d1 == d2
+
+
+class ReporterEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, Exception):
+ return str(o)
+
+ return json.JSONEncoder.default(self, o)
+
+
+class Reporter:
+ def __init__(self):
+ self.files = OrderedDict()
+
+ def _ensure_default_entry(self, path):
+ try:
+ self.files[path]
+ except KeyError:
+ self.files[path] = {
+ 'errors': [],
+ 'warnings': [],
+ 'traces': [],
+ 'warning_traces': []
+ }
+
+ def _log(self, path, code, msg, level='error', line=0, column=0):
+ self._ensure_default_entry(path)
+ lvl_dct = self.files[path]['%ss' % level]
+ lvl_dct.append({
+ 'code': code,
+ 'msg': msg,
+ 'line': line,
+ 'column': column
+ })
+
+ def error(self, *args, **kwargs):
+ self._log(*args, level='error', **kwargs)
+
+ def warning(self, *args, **kwargs):
+ self._log(*args, level='warning', **kwargs)
+
+ def trace(self, path, tracebk):
+ self._ensure_default_entry(path)
+ self.files[path]['traces'].append(tracebk)
+
+ def warning_trace(self, path, tracebk):
+ self._ensure_default_entry(path)
+ self.files[path]['warning_traces'].append(tracebk)
+
+ @staticmethod
+ @contextmanager
+ def _output_handle(output):
+ if output != '-':
+ handle = open(output, 'w+')
+ else:
+ handle = sys.stdout
+
+ yield handle
+
+ handle.flush()
+ handle.close()
+
+ @staticmethod
+ def _filter_out_ok(reports):
+ temp_reports = OrderedDict()
+ for path, report in reports.items():
+ if report['errors'] or report['warnings']:
+ temp_reports[path] = report
+
+ return temp_reports
+
+ def plain(self, warnings=False, output='-'):
+ """Print out the test results in plain format
+
+ output is ignored here for now
+ """
+ ret = []
+
+ for path, report in Reporter._filter_out_ok(self.files).items():
+ traces = report['traces'][:]
+ if warnings and report['warnings']:
+ traces.extend(report['warning_traces'])
+
+ for trace in traces:
+ print('TRACE:')
+ print('\n '.join((' %s' % trace).splitlines()))
+ for error in report['errors']:
+ error['path'] = path
+ print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error)
+ ret.append(1)
+ if warnings:
+ for warning in report['warnings']:
+ warning['path'] = path
+ print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning)
+
+ return 3 if ret else 0
+
+ def json(self, warnings=False, output='-'):
+ """Print out the test results in json format
+
+ warnings is not respected in this output
+ """
+ ret = [len(r['errors']) for r in self.files.values()]
+
+ with Reporter._output_handle(output) as handle:
+ print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
+
+ return 3 if sum(ret) else 0
+
+
+class Validator(with_metaclass(abc.ABCMeta, object)):
+ """Validator instances are intended to be run on a single object. if you
+ are scanning multiple objects for problems, you'll want to have a separate
+ Validator for each one."""
+
+ def __init__(self, reporter=None):
+ self.reporter = reporter
+
+ @property
+ @abc.abstractmethod
+ def object_name(self):
+ """Name of the object we validated"""
+ pass
+
+ @property
+ @abc.abstractmethod
+ def object_path(self):
+ """Path of the object we validated"""
+ pass
+
+ @abc.abstractmethod
+ def validate(self):
+ """Run this method to generate the test results"""
+ pass
+
+
+class ModuleValidator(Validator):
+ REJECTLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
+ REJECTLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
+ '.gitattributes', '.gitmodules', 'COPYING',
+ '__init__.py', 'VERSION', 'test-docs.sh'))
+ REJECTLIST = REJECTLIST_FILES.union(REJECTLIST['module'])
+
+ # win_dsc is a dynamic arg spec, the docs won't ever match
+ PS_ARG_VALIDATE_REJECTLIST = frozenset(('win_dsc.ps1', ))
+
+ ACCEPTLIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function'))
+
+ def __init__(self, path, analyze_arg_spec=False, collection=None, collection_version=None,
+ base_branch=None, git_cache=None, reporter=None, routing=None, plugin_type='module'):
+ super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
+
+ self.path = path
+ self.basename = os.path.basename(self.path)
+ self.name = os.path.splitext(self.basename)[0]
+ self.plugin_type = plugin_type
+
+ self.analyze_arg_spec = analyze_arg_spec and plugin_type == 'module'
+
+ self._Version = LooseVersion
+ self._StrictVersion = StrictVersion
+
+ self.collection = collection
+ self.collection_name = 'ansible.builtin'
+ if self.collection:
+ self._Version = SemanticVersion
+ self._StrictVersion = SemanticVersion
+ collection_namespace_path, collection_name = os.path.split(self.collection)
+ self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
+ self.routing = routing
+ self.collection_version = None
+ if collection_version is not None:
+ self.collection_version_str = collection_version
+ self.collection_version = SemanticVersion(collection_version)
+
+ self.base_branch = base_branch
+ self.git_cache = git_cache or GitCache()
+
+ self._python_module_override = False
+
+ with open(path) as f:
+ self.text = f.read()
+ self.length = len(self.text.splitlines())
+ try:
+ self.ast = ast.parse(self.text)
+ except Exception:
+ self.ast = None
+
+ if base_branch:
+ self.base_module = self._get_base_file()
+ else:
+ self.base_module = None
+
+ def _create_version(self, v, collection_name=None):
+ if not v:
+ raise ValueError('Empty string is not a valid version')
+ if collection_name == 'ansible.builtin':
+ return LooseVersion(v)
+ if collection_name is not None:
+ return SemanticVersion(v)
+ return self._Version(v)
+
+ def _create_strict_version(self, v, collection_name=None):
+ if not v:
+ raise ValueError('Empty string is not a valid version')
+ if collection_name == 'ansible.builtin':
+ return StrictVersion(v)
+ if collection_name is not None:
+ return SemanticVersion(v)
+ return self._StrictVersion(v)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if not self.base_module:
+ return
+
+ try:
+ os.remove(self.base_module)
+ except Exception:
+ pass
+
+ @property
+ def object_name(self):
+ return self.basename
+
+ @property
+ def object_path(self):
+ return self.path
+
+ def _get_collection_meta(self):
+ """Implement if we need this for version_added comparisons
+ """
+ pass
+
+ def _python_module(self):
+ if self.path.endswith('.py') or self._python_module_override:
+ return True
+ return False
+
+ def _powershell_module(self):
+ if self.path.endswith('.ps1'):
+ return True
+ return False
+
+ def _sidecar_doc(self):
+ if self.path.endswith('.yml') or self.path.endswith('.yaml'):
+ return True
+ return False
+
+ def _just_docs(self):
+ """Module can contain just docs and from __future__ boilerplate
+ """
+ try:
+ for child in self.ast.body:
+ if not isinstance(child, ast.Assign):
+ # allow string constant expressions (these are docstrings)
+ if isinstance(child, ast.Expr) and isinstance(child.value, ast.Constant) and isinstance(child.value.value, str):
+ continue
+
+ # allowed from __future__ imports
+ if isinstance(child, ast.ImportFrom) and child.module == '__future__':
+ for future_import in child.names:
+ if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS:
+ break
+ else:
+ continue
+ return False
+ return True
+ except AttributeError:
+ return False
+
+ def _get_base_branch_module_path(self):
+ """List all paths within lib/ansible/modules to try and match a moved module"""
+ return self.git_cache.base_module_paths.get(self.object_name)
+
+ def _has_alias(self):
+ """Return true if the module has any aliases."""
+ return self.object_name in self.git_cache.head_aliased_modules
+
+ def _get_base_file(self):
+ # In case of module moves, look for the original location
+ base_path = self._get_base_branch_module_path()
+ ext = os.path.splitext(base_path or self.path)[1]
+
+ command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)]
+ p = subprocess.run(command, stdin=subprocess.DEVNULL, capture_output=True, check=False)
+
+ if int(p.returncode) != 0:
+ return None
+
+ t = tempfile.NamedTemporaryFile(delete=False, suffix=ext)
+ t.write(p.stdout)
+ t.close()
+
+ return t.name
+
+ def _is_new_module(self):
+ if self._has_alias():
+ return False
+
+ return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module)
+
+ def _check_interpreter(self, powershell=False):
+ if powershell:
+ if not self.text.startswith('#!powershell\n'):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-powershell-interpreter',
+ msg='Interpreter line is not "#!powershell"'
+ )
+ return
+
+ missing_python_interpreter = False
+
+ if not self.text.startswith('#!/usr/bin/python'):
+ if NEW_STYLE_PYTHON_MODULE_RE.search(to_bytes(self.text)):
+ missing_python_interpreter = self.text.startswith('#!') # shebang optional, but if present must match
+ else:
+ missing_python_interpreter = True # shebang required
+
+ if missing_python_interpreter:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-python-interpreter',
+ msg='Interpreter line is not "#!/usr/bin/python"',
+ )
+
+ def _check_type_instead_of_isinstance(self, powershell=False):
+ if powershell:
+ return
+ for line_no, line in enumerate(self.text.splitlines()):
+ typekeyword = TYPE_REGEX.match(line)
+ if typekeyword:
+ # TODO: add column
+ self.reporter.error(
+ path=self.object_path,
+ code='unidiomatic-typecheck',
+ msg=('Type comparison using type() found. '
+ 'Use isinstance() instead'),
+ line=line_no + 1
+ )
+
+ def _check_for_sys_exit(self):
+ # Optimize out the happy path
+ if 'sys.exit' not in self.text:
+ return
+
+ for line_no, line in enumerate(self.text.splitlines()):
+ sys_exit_usage = SYS_EXIT_REGEX.match(line)
+ if sys_exit_usage:
+ # TODO: add column
+ self.reporter.error(
+ path=self.object_path,
+ code='use-fail-json-not-sys-exit',
+ msg='sys.exit() call found. Should be exit_json/fail_json',
+ line=line_no + 1
+ )
+
+ def _check_gpl3_header(self):
+ header = '\n'.join(self.text.split('\n')[:20])
+ if ('GNU General Public License' not in header or
+ ('version 3' not in header and 'v3.0' not in header)):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-gplv3-license',
+ msg='GPLv3 license header not found in the first 20 lines of the module'
+ )
+ elif self._is_new_module():
+ if len([line for line in header
+ if 'GNU General Public License' in line]) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-short-gplv3-license',
+ msg='Found old style GPLv3 license header: '
+ 'https://docs.ansible.com/ansible-core/devel/dev_guide/developing_modules_documenting.html#copyright'
+ )
+
+ def _check_for_subprocess(self):
+ for child in self.ast.body:
+ if isinstance(child, ast.Import):
+ if child.names[0].name == 'subprocess':
+ for line_no, line in enumerate(self.text.splitlines()):
+ sp_match = SUBPROCESS_REGEX.search(line)
+ if sp_match:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-run-command-not-popen',
+ msg=('subprocess.Popen call found. Should be module.run_command'),
+ line=(line_no + 1),
+ column=(sp_match.span()[0] + 1)
+ )
+
+ def _check_for_os_call(self):
+ if 'os.call' in self.text:
+ for line_no, line in enumerate(self.text.splitlines()):
+ os_call_match = OS_CALL_REGEX.search(line)
+ if os_call_match:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-run-command-not-os-call',
+ msg=('os.call() call found. Should be module.run_command'),
+ line=(line_no + 1),
+ column=(os_call_match.span()[0] + 1)
+ )
+
+ def _find_rejectlist_imports(self):
+ for child in self.ast.body:
+ names = []
+ if isinstance(child, ast.Import):
+ names.extend(child.names)
+ elif isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, ast.Import):
+ names.extend(grandchild.names)
+ for name in names:
+ # TODO: Add line/col
+ for rejectlist_import, options in REJECTLIST_IMPORTS.items():
+ if re.search(rejectlist_import, name.name):
+ new_only = options['new_only']
+ if self._is_new_module() and new_only:
+ self.reporter.error(
+ path=self.object_path,
+ **options['error']
+ )
+ elif not new_only:
+ self.reporter.error(
+ path=self.object_path,
+ **options['error']
+ )
+
+ def _find_module_utils(self):
+ linenos = []
+ found_basic = False
+ for child in self.ast.body:
+ if isinstance(child, (ast.Import, ast.ImportFrom)):
+ names = []
+ try:
+ names.append(child.module)
+ if child.module.endswith('.basic'):
+ found_basic = True
+ except AttributeError:
+ pass
+ names.extend([n.name for n in child.names])
+
+ if [n for n in names if n.startswith('ansible.module_utils')]:
+ linenos.append(child.lineno)
+
+ for name in child.names:
+ if ('module_utils' in getattr(child, 'module', '') and
+ isinstance(name, ast.alias) and
+ name.name == '*'):
+ msg = (
+ 'module-utils-specific-import',
+ ('module_utils imports should import specific '
+ 'components, not "*"')
+ )
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=child.lineno
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=child.lineno
+ )
+
+ if (isinstance(name, ast.alias) and
+ name.name == 'basic'):
+ found_basic = True
+
+ if not found_basic:
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-module-utils-basic-import',
+ msg='Did not find "ansible.module_utils.basic" import'
+ )
+
+ return linenos
+
+ def _get_first_callable(self):
+ linenos = []
+ for child in self.ast.body:
+ if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
+ linenos.append(child.lineno)
+
+ return min(linenos) if linenos else None
+
+ def _find_has_import(self):
+ for child in self.ast.body:
+ found_try_except_import = False
+ found_has = False
+ if isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, ast.Import):
+ found_try_except_import = True
+ if isinstance(grandchild, ast.Assign):
+ for target in grandchild.targets:
+ if not isinstance(target, ast.Name):
+ continue
+ if target.id.lower().startswith('has_'):
+ found_has = True
+ if found_try_except_import and not found_has:
+ # TODO: Add line/col
+ self.reporter.warning(
+ path=self.object_path,
+ code='try-except-missing-has',
+ msg='Found Try/Except block without HAS_ assignment'
+ )
+
+ def _ensure_imports_below_docs(self, doc_info, first_callable):
+ min_doc_line = min(doc_info[key]['lineno'] for key in doc_info)
+ max_doc_line = max(doc_info[key]['end_lineno'] for key in doc_info)
+
+ import_lines = []
+
+ for child in self.ast.body:
+ if isinstance(child, (ast.Import, ast.ImportFrom)):
+ if isinstance(child, ast.ImportFrom) and child.module == '__future__':
+ # allowed from __future__ imports
+ for future_import in child.names:
+ if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS:
+ self.reporter.error(
+ path=self.object_path,
+ code='illegal-future-imports',
+ msg=('Only the following from __future__ imports are allowed: %s'
+ % ', '.join(self.ACCEPTLIST_FUTURE_IMPORTS)),
+ line=child.lineno
+ )
+ break
+ else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import
+ continue
+ import_lines.append(child.lineno)
+ if child.lineno < min_doc_line:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-before-documentation',
+ msg=('Import found before documentation variables. '
+ 'All imports must appear below '
+ 'DOCUMENTATION/EXAMPLES/RETURN.'),
+ line=child.lineno
+ )
+ break
+ elif isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
+ import_lines.append(grandchild.lineno)
+ if grandchild.lineno < min_doc_line:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-before-documentation',
+ msg=('Import found before documentation '
+ 'variables. All imports must appear below '
+ 'DOCUMENTATION/EXAMPLES/RETURN.'),
+ line=child.lineno
+ )
+ break
+
+ for import_line in import_lines:
+ if not (max_doc_line < import_line < first_callable):
+ msg = (
+ 'import-placement',
+ ('Imports should be directly below DOCUMENTATION/EXAMPLES/'
+ 'RETURN.')
+ )
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=import_line
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=import_line
+ )
+
+ def _validate_ps_replacers(self):
+ # loop all (for/else + error)
+ # get module list for each
+ # check "shape" of each module name
+
+ module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
+ csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)'
+ found_requires = False
+
+ for req_stmt in re.finditer(module_requires, self.text):
+ found_requires = True
+ # this will bomb on dictionary format - "don't do that"
+ module_list = [x.strip() for x in req_stmt.group(1).split(',')]
+ if len(module_list) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='multiple-utils-per-requires',
+ msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
+ )
+ continue
+
+ module_name = module_list[0]
+
+ if module_name.lower().endswith('.psm1'):
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-requires-extension',
+ msg='Module #Requires should not end in .psm1: "%s"' % module_name
+ )
+
+ for req_stmt in re.finditer(csharp_requires, self.text):
+ found_requires = True
+ # this will bomb on dictionary format - "don't do that"
+ module_list = [x.strip() for x in req_stmt.group(1).split(',')]
+ if len(module_list) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='multiple-csharp-utils-per-requires',
+ msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
+ )
+ continue
+
+ module_name = module_list[0]
+
+ if module_name.lower().endswith('.cs'):
+ self.reporter.error(
+ path=self.object_path,
+ code='illegal-extension-cs',
+ msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
+ )
+
+ # also accept the legacy #POWERSHELL_COMMON replacer signal
+ if not found_requires and REPLACER_WINDOWS not in self.text:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-module-utils-import-csharp-requirements',
+ msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
+ )
+
+ def _find_ps_docs_file(self):
+ sidecar = self._find_sidecar_docs()
+ if sidecar:
+ return sidecar
+
+ py_path = self.path.replace('.ps1', '.py')
+ if not os.path.isfile(py_path):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-documentation',
+ msg='No DOCUMENTATION provided'
+ )
+ return py_path
+
+ def _find_sidecar_docs(self):
+ base_path = os.path.splitext(self.path)[0]
+ for ext in ('.yml', '.yaml'):
+ doc_path = f"{base_path}{ext}"
+ if os.path.isfile(doc_path):
+ return doc_path
+
+ def _get_py_docs(self):
+ docs = {
+ 'DOCUMENTATION': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ 'EXAMPLES': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ 'RETURN': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ }
+ for child in self.ast.body:
+ if isinstance(child, ast.Assign):
+ for grandchild in child.targets:
+ if not isinstance(grandchild, ast.Name):
+ continue
+
+ if grandchild.id == 'DOCUMENTATION':
+ docs['DOCUMENTATION']['value'] = child.value.s
+ docs['DOCUMENTATION']['lineno'] = child.lineno
+ docs['DOCUMENTATION']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+ elif grandchild.id == 'EXAMPLES':
+ docs['EXAMPLES']['value'] = child.value.s
+ docs['EXAMPLES']['lineno'] = child.lineno
+ docs['EXAMPLES']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+ elif grandchild.id == 'RETURN':
+ docs['RETURN']['value'] = child.value.s
+ docs['RETURN']['lineno'] = child.lineno
+ docs['RETURN']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+
+ return docs
+
+ def _validate_docs_schema(self, doc, schema, name, error_code):
+ # TODO: Add line/col
+ errors = []
+ try:
+ schema(doc)
+ except Exception as e:
+ for error in e.errors:
+ error.data = doc
+ errors.extend(e.errors)
+
+ for error in errors:
+ path = [str(p) for p in error.path]
+
+ local_error_code = getattr(error, 'ansible_error_code', error_code)
+
+ if isinstance(error.data, dict):
+ error_message = humanize_error(error.data, error)
+ else:
+ error_message = error
+
+ if path:
+ combined_path = '%s.%s' % (name, '.'.join(path))
+ else:
+ combined_path = name
+
+ self.reporter.error(
+ path=self.object_path,
+ code=local_error_code,
+ msg='%s: %s' % (combined_path, error_message)
+ )
+
+ def _validate_docs(self):
+ doc = None
+ # We have three ways of marking deprecated/removed files. Have to check each one
+ # individually and then make sure they all agree
+ filename_deprecated_or_removed = False
+ deprecated = False
+ doc_deprecated = None # doc legally might not exist
+ routing_says_deprecated = False
+
+ if self.object_name.startswith('_') and not os.path.islink(self.object_path):
+ filename_deprecated_or_removed = True
+
+ # We are testing a collection
+ if self.routing:
+ routing_deprecation = self.routing.get('plugin_routing', {})
+ routing_deprecation = routing_deprecation.get('modules' if self.plugin_type == 'module' else self.plugin_type, {})
+ routing_deprecation = routing_deprecation.get(self.name, {}).get('deprecation', {})
+ if routing_deprecation:
+ # meta/runtime.yml says this is deprecated
+ routing_says_deprecated = True
+ deprecated = True
+
+ if self._python_module():
+ doc_info = self._get_py_docs()
+ else:
+ doc_info = None
+
+ sidecar_text = None
+ if self._sidecar_doc():
+ sidecar_text = self.text
+ elif sidecar_path := self._find_sidecar_docs():
+ with open(sidecar_path, mode='r', encoding='utf-8') as fd:
+ sidecar_text = fd.read()
+
+ if sidecar_text:
+ sidecar_doc, errors, traces = parse_yaml(sidecar_text, 0, self.name, 'DOCUMENTATION')
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='documentation-syntax-error',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ doc = sidecar_doc.get('DOCUMENTATION', None)
+ examples_raw = sidecar_doc.get('EXAMPLES', None)
+ examples_lineno = 1
+ returns = sidecar_doc.get('RETURN', None)
+
+ elif doc_info:
+ if bool(doc_info['DOCUMENTATION']['value']):
+ doc, errors, traces = parse_yaml(
+ doc_info['DOCUMENTATION']['value'],
+ doc_info['DOCUMENTATION']['lineno'],
+ self.name, 'DOCUMENTATION'
+ )
+
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='documentation-syntax-error',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ examples_raw = doc_info['EXAMPLES']['value']
+ examples_lineno = doc_info['EXAMPLES']['lineno']
+
+ returns = None
+ if bool(doc_info['RETURN']['value']):
+ returns, errors, traces = parse_yaml(doc_info['RETURN']['value'],
+ doc_info['RETURN']['lineno'],
+ self.name, 'RETURN')
+
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='return-syntax-error',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ if doc:
+ add_collection_to_versions_and_dates(doc, self.collection_name,
+ is_module=self.plugin_type == 'module')
+
+ missing_fragment = False
+ with CaptureStd():
+ try:
+ get_docstring(self.path, fragment_loader=fragment_loader,
+ verbose=True,
+ collection_name=self.collection_name,
+ plugin_type=self.plugin_type)
+ except AssertionError:
+ fragment = doc['extends_documentation_fragment']
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-doc-fragment',
+ msg='DOCUMENTATION fragment missing: %s' % fragment
+ )
+ missing_fragment = True
+ except Exception as e:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ self.reporter.error(
+ path=self.object_path,
+ code='documentation-error',
+ msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
+ )
+
+ if not missing_fragment:
+ add_fragments(doc, self.object_path, fragment_loader=fragment_loader,
+ is_module=self.plugin_type == 'module')
+
+ if 'options' in doc and doc['options'] is None:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-documentation-options',
+ msg='DOCUMENTATION.options must be a dictionary/hash when used',
+ )
+
+ if 'deprecated' in doc and doc.get('deprecated'):
+ doc_deprecated = True
+ doc_deprecation = doc['deprecated']
+ documentation_collection = doc_deprecation.get('removed_from_collection')
+ if documentation_collection != self.collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-wrong-collection',
+ msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
+ documentation_collection, self.collection_name)
+ )
+ else:
+ doc_deprecated = False
+
+ if os.path.islink(self.object_path):
+ # This module has an alias, which we can tell as it's a symlink
+ # Rather than checking for `module: $filename` we need to check against the true filename
+ self._validate_docs_schema(
+ doc,
+ doc_schema(
+ os.readlink(self.object_path).split('.')[0],
+ for_collection=bool(self.collection),
+ deprecated_module=deprecated,
+ plugin_type=self.plugin_type,
+ ),
+ 'DOCUMENTATION',
+ 'invalid-documentation',
+ )
+ else:
+ # This is the normal case
+ self._validate_docs_schema(
+ doc,
+ doc_schema(
+ self.object_name.split('.')[0],
+ for_collection=bool(self.collection),
+ deprecated_module=deprecated,
+ plugin_type=self.plugin_type,
+ ),
+ 'DOCUMENTATION',
+ 'invalid-documentation',
+ )
+
+ if not self.collection:
+ existing_doc = self._check_for_new_args(doc)
+ self._check_version_added(doc, existing_doc)
+ else:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-documentation',
+ msg='No DOCUMENTATION provided',
+ )
+
+ if not examples_raw and self.plugin_type in PLUGINS_WITH_EXAMPLES:
+ if self.plugin_type in PLUGINS_WITH_EXAMPLES:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-examples',
+ msg='No EXAMPLES provided'
+ )
+
+ elif self.plugin_type in PLUGINS_WITH_YAML_EXAMPLES:
+ dummy, errors, traces = parse_yaml(examples_raw,
+ examples_lineno,
+ self.name, 'EXAMPLES',
+ load_all=True,
+ ansible_loader=True)
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-examples',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ if returns:
+ if returns:
+ add_collection_to_versions_and_dates(
+ returns,
+ self.collection_name,
+ is_module=self.plugin_type == 'module',
+ return_docs=True)
+ self._validate_docs_schema(
+ returns,
+ return_schema(for_collection=bool(self.collection), plugin_type=self.plugin_type),
+ 'RETURN', 'return-syntax-error')
+
+ elif self.plugin_type in PLUGINS_WITH_RETURN_VALUES:
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-return',
+ msg='No RETURN provided'
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-return-legacy',
+ msg='No RETURN provided'
+ )
+
+ # Check for mismatched deprecation
+ if not self.collection:
+ mismatched_deprecation = True
+ if not (filename_deprecated_or_removed or deprecated or doc_deprecated):
+ mismatched_deprecation = False
+ else:
+ if (filename_deprecated_or_removed and doc_deprecated):
+ mismatched_deprecation = False
+ if (filename_deprecated_or_removed and not doc):
+ mismatched_deprecation = False
+
+ if mismatched_deprecation:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='Module deprecation/removed must agree in documentation, by prepending filename with'
+ ' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
+ ' documentation for removed'
+ )
+ else:
+ # We are testing a collection
+ if self.object_name.startswith('_'):
+ self.reporter.error(
+ path=self.object_path,
+ code='collections-no-underscore-on-deprecation',
+ msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead',
+ )
+
+ if not (doc_deprecated == routing_says_deprecated):
+ # DOCUMENTATION.deprecated and meta/runtime.yml disagree
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
+ )
+ elif routing_says_deprecated:
+ # Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
+ # Make sure they give the same version or date.
+ routing_date = routing_deprecation.get('removal_date')
+ routing_version = routing_deprecation.get('removal_version')
+ # The versions and dates in the module documentation are auto-tagged, so remove the tag
+ # to make comparison possible and to avoid confusing the user.
+ documentation_date = doc_deprecation.get('removed_at_date')
+ documentation_version = doc_deprecation.get('removed_in')
+ if not compare_dates(routing_date, documentation_date):
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
+ routing_date, documentation_date)
+ )
+ if routing_version != documentation_version:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
+ routing_version, documentation_version)
+ )
+
+ # In the future we should error if ANSIBLE_METADATA exists in a collection
+
+ return doc_info, doc
+
+ def _check_version_added(self, doc, existing_doc):
+ version_added_raw = doc.get('version_added')
+ try:
+ collection_name = doc.get('version_added_collection')
+ version_added = self._create_strict_version(
+ str(version_added_raw or '0.0'),
+ collection_name=collection_name)
+ except ValueError as e:
+ version_added = version_added_raw or '0.0'
+ if self._is_new_module() or version_added != 'historical':
+ # already reported during schema validation, except:
+ if version_added == 'historical':
+ self.reporter.error(
+ path=self.object_path,
+ code='module-invalid-version-added',
+ msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
+ )
+ return
+
+ if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
+ self.reporter.error(
+ path=self.object_path,
+ code='module-incorrect-version-added',
+ msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
+ )
+
+ if not self._is_new_module():
+ return
+
+ should_be = '.'.join(ansible_version.split('.')[:2])
+ strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
+
+ if (version_added < strict_ansible_version or
+ strict_ansible_version < version_added):
+ self.reporter.error(
+ path=self.object_path,
+ code='module-incorrect-version-added',
+ msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
+ )
+
+ def _validate_ansible_module_call(self, docs):
+ try:
+ spec, kwargs = get_argument_spec(self.path, self.collection)
+ except AnsibleModuleNotInitialized:
+ self.reporter.error(
+ path=self.object_path,
+ code='ansible-module-not-initialized',
+ msg="Execution of the module did not result in initialization of AnsibleModule",
+ )
+ return
+ except AnsibleModuleImportError as e:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-error',
+ msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
+ )
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ return
+
+ schema = ansible_module_kwargs_schema(self.object_name.split('.')[0], for_collection=bool(self.collection))
+ self._validate_docs_schema(kwargs, schema, 'AnsibleModule', 'invalid-ansiblemodule-schema')
+
+ self._validate_argument_spec(docs, spec, kwargs)
+
+ def _validate_list_of_module_args(self, name, terms, spec, context):
+ if terms is None:
+ return
+ if not isinstance(terms, (list, tuple)):
+ # This is already reported by schema checking
+ return
+ for check in terms:
+ if not isinstance(check, (list, tuple)):
+ # This is already reported by schema checking
+ continue
+ bad_term = False
+ for term in check:
+ if not isinstance(term, string_types):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must contain strings in the lists or tuples; found value %r" % (term, )
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-type',
+ msg=msg,
+ )
+ bad_term = True
+ if bad_term:
+ continue
+ if len(set(check)) != len(check):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms"
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-collision',
+ msg=msg,
+ )
+ if not set(check) <= set(spec):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-unknown',
+ msg=msg,
+ )
+
+ def _validate_required_if(self, terms, spec, context, module):
+ if terms is None:
+ return
+ if not isinstance(terms, (list, tuple)):
+ # This is already reported by schema checking
+ return
+ for check in terms:
+ if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
+ # This is already reported by schema checking
+ continue
+ if len(check) == 4 and not isinstance(check[3], bool):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-is_one_of-type',
+ msg=msg,
+ )
+ requirements = check[2]
+ if not isinstance(requirements, (list, tuple)):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-type',
+ msg=msg,
+ )
+ continue
+ bad_term = False
+ for term in requirements:
+ if not isinstance(term, string_types):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have only strings in third value (requirements); got %r" % (term, )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-type',
+ msg=msg,
+ )
+ bad_term = True
+ if bad_term:
+ continue
+ if len(set(requirements)) != len(requirements):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms in requirements"
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-collision',
+ msg=msg,
+ )
+ if not set(requirements) <= set(spec):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-unknown',
+ msg=msg,
+ )
+ key = check[0]
+ if key not in spec:
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have its key %s in argument_spec" % key
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-unknown-key',
+ msg=msg,
+ )
+ continue
+ if key in requirements:
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains its key %s in requirements" % key
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-key-in-requirements',
+ msg=msg,
+ )
+ value = check[1]
+ if value is not None:
+ _type = spec[key].get('type', 'str')
+ if callable(_type):
+ _type_checker = _type
+ else:
+ _type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
+ try:
+ with CaptureStd():
+ dummy = _type_checker(value)
+ except (Exception, SystemExit):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-value-type',
+ msg=msg,
+ )
+
+ def _validate_required_by(self, terms, spec, context):
+ if terms is None:
+ return
+ if not isinstance(terms, Mapping):
+ # This is already reported by schema checking
+ return
+ for key, value in terms.items():
+ if isinstance(value, string_types):
+ value = [value]
+ if not isinstance(value, (list, tuple)):
+ # This is already reported by schema checking
+ continue
+ for term in value:
+ if not isinstance(term, string_types):
+ # This is already reported by schema checking
+ continue
+ if len(set(value)) != len(value) or key in value:
+ msg = "required_by"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms"
+ self.reporter.error(
+ path=self.object_path,
+ code='required_by-collision',
+ msg=msg,
+ )
+ if not set(value) <= set(spec) or key not in spec:
+ msg = "required_by"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code='required_by-unknown',
+ msg=msg,
+ )
+
+ def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
+ if not self.analyze_arg_spec:
+ return
+
+ if docs is None:
+ docs = {}
+
+ if context is None:
+ context = []
+
+ if last_context_spec is None:
+ last_context_spec = kwargs
+
+ try:
+ if not context:
+ add_fragments(docs, self.object_path, fragment_loader=fragment_loader,
+ is_module=self.plugin_type == 'module')
+ except Exception:
+ # Cannot merge fragments
+ return
+
+ # Use this to access type checkers later
+ module = NoArgsAnsibleModule({})
+
+ self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
+ self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
+ self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
+ self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
+ self._validate_required_by(last_context_spec.get('required_by'), spec, context)
+
+ provider_args = set()
+ args_from_argspec = set()
+ deprecated_args_from_argspec = set()
+ doc_options = docs.get('options', {})
+ if doc_options is None:
+ doc_options = {}
+ for arg, data in spec.items():
+ restricted_argument_names = ('message', 'syslog_facility')
+ if arg.lower() in restricted_argument_names:
+ msg = "Argument '%s' in argument_spec " % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += "must not be one of %s as it is used " \
+ "internally by Ansible Core Engine" % (",".join(restricted_argument_names))
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-name',
+ msg=msg,
+ )
+ continue
+ if 'aliases' in data:
+ for al in data['aliases']:
+ if al.lower() in restricted_argument_names:
+ msg = "Argument alias '%s' in argument_spec " % al
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += "must not be one of %s as it is used " \
+ "internally by Ansible Core Engine" % (",".join(restricted_argument_names))
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-name',
+ msg=msg,
+ )
+ continue
+
+ # Could this a place where secrets are leaked?
+ # If it is type: path we know it's not a secret key as it's a file path.
+ # If it is type: bool it is more likely a flag indicating that something is secret, than an actual secret.
+ if all((
+ data.get('no_log') is None, is_potential_secret_option(arg),
+ data.get('type') not in ("path", "bool"), data.get('choices') is None,
+ )):
+ msg = "Argument '%s' in argument_spec could be a secret, though doesn't have `no_log` set" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ self.reporter.error(
+ path=self.object_path,
+ code='no-log-needed',
+ msg=msg,
+ )
+
+ if not isinstance(data, dict):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must be a dictionary/hash when used"
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-spec',
+ msg=msg,
+ )
+ continue
+
+ removed_at_date = data.get('removed_at_date', None)
+ if removed_at_date is not None:
+ try:
+ if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has a removed_at_date '%s' before today" % removed_at_date
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecated-date',
+ msg=msg,
+ )
+ except ValueError:
+ # This should only happen when removed_at_date is not in ISO format. Since schema
+ # validation already reported this as an error, don't report it a second time.
+ pass
+
+ deprecated_aliases = data.get('deprecated_aliases', None)
+ if deprecated_aliases is not None:
+ for deprecated_alias in deprecated_aliases:
+ if 'name' in deprecated_alias and 'date' in deprecated_alias:
+ try:
+ date = deprecated_alias['date']
+ if parse_isodate(date, allow_date=False) < datetime.date.today():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
+ deprecated_alias['name'], deprecated_alias['date'])
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecated-date',
+ msg=msg,
+ )
+ except ValueError:
+ # This should only happen when deprecated_alias['date'] is not in ISO format. Since
+ # schema validation already reported this as an error, don't report it a second
+ # time.
+ pass
+
+ has_version = False
+ if self.collection and self.collection_version is not None:
+ compare_version = self.collection_version
+ version_of_what = "this collection (%s)" % self.collection_version_str
+ code_prefix = 'collection'
+ has_version = True
+ elif not self.collection:
+ compare_version = LOOSE_ANSIBLE_VERSION
+ version_of_what = "Ansible (%s)" % ansible_version
+ code_prefix = 'ansible'
+ has_version = True
+
+ removed_in_version = data.get('removed_in_version', None)
+ if removed_in_version is not None:
+ try:
+ collection_name = data.get('removed_from_collection')
+ removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
+ if has_version and collection_name == self.collection_name and compare_version >= removed_in:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has a deprecated removed_in_version %r," % removed_in_version
+ msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code=code_prefix + '-deprecated-version',
+ msg=msg,
+ )
+ except ValueError as e:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+ except TypeError:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
+ msg += " error while comparing to version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+
+ if deprecated_aliases is not None:
+ for deprecated_alias in deprecated_aliases:
+ if 'name' in deprecated_alias and 'version' in deprecated_alias:
+ try:
+ collection_name = deprecated_alias.get('collection_name')
+ version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
+ if has_version and collection_name == self.collection_name and compare_version >= version:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with removal in version %r," % (
+ deprecated_alias['name'], deprecated_alias['version'])
+ msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code=code_prefix + '-deprecated-version',
+ msg=msg,
+ )
+ except ValueError as e:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
+ deprecated_alias['name'], deprecated_alias['version'], e)
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+ except TypeError:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
+ deprecated_alias['name'], deprecated_alias['version'])
+ msg += " error while comparing to version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+
+ aliases = data.get('aliases', [])
+ if arg in aliases:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is specified as its own alias"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-alias-self',
+ msg=msg
+ )
+ if len(aliases) > len(set(aliases)):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has at least one alias specified multiple times in aliases"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-alias-repeated',
+ msg=msg
+ )
+ if not context and arg == 'state':
+ bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
+ for bad_state in bad_states:
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-state-invalid-choice',
+ msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
+ if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
+ args_from_argspec.add(arg)
+ args_from_argspec.update(aliases)
+ else:
+ deprecated_args_from_argspec.add(arg)
+ deprecated_args_from_argspec.update(aliases)
+ if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
+ if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-spec-options',
+ msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
+ )
+ elif data.get('options'):
+ # Record provider options from network modules, for later comparison
+ for provider_arg, provider_data in data.get('options', {}).items():
+ provider_args.add(provider_arg)
+ provider_args.update(provider_data.get('aliases', []))
+
+ if data.get('required') and data.get('default', object) != object:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is marked as required but specifies a default. Arguments with a" \
+ " default should not be marked as required"
+ self.reporter.error(
+ path=self.object_path,
+ code='no-default-for-required-parameter',
+ msg=msg
+ )
+
+ if arg in provider_args:
+ # Provider args are being removed from network module top level
+ # don't validate docs<->arg_spec checks below
+ continue
+
+ _type = data.get('type', 'str')
+ if callable(_type):
+ _type_checker = _type
+ else:
+ _type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
+
+ _elements = data.get('elements')
+ if (_type == 'list') and not _elements:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as list but elements is not defined"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-list-no-elements',
+ msg=msg
+ )
+ if _elements:
+ if not callable(_elements):
+ DEFAULT_TYPE_VALIDATORS.get(_elements)
+ if _type != 'list':
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-invalid-elements',
+ msg=msg
+ )
+
+ arg_default = None
+ if 'default' in data and not is_empty(data['default']):
+ try:
+ with CaptureStd():
+ arg_default = _type_checker(data['default'])
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='incompatible-default-type',
+ msg=msg
+ )
+ continue
+
+ doc_options_args = []
+ for alias in sorted(set([arg] + list(aliases))):
+ if alias in doc_options:
+ doc_options_args.append(alias)
+ if len(doc_options_args) == 0:
+ # Undocumented arguments will be handled later (search for undocumented-parameter)
+ doc_options_arg = {}
+ else:
+ doc_options_arg = doc_options[doc_options_args[0]]
+ if len(doc_options_args) > 1:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " with aliases %s is documented multiple times, namely as %s" % (
+ ", ".join([("'%s'" % alias) for alias in aliases]),
+ ", ".join([("'%s'" % alias) for alias in doc_options_args])
+ )
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-documented-multiple-times',
+ msg=msg
+ )
+
+ try:
+ doc_default = None
+ if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']):
+ with CaptureStd():
+ doc_default = _type_checker(doc_options_arg['default'])
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in documentation" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-default-incompatible-type',
+ msg=msg
+ )
+ continue
+
+ if arg_default != doc_default:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-default-does-not-match-spec',
+ msg=msg
+ )
+
+ doc_type = doc_options_arg.get('type')
+ if 'type' in data and data['type'] is not None:
+ if doc_type is None:
+ if not arg.startswith('_'): # hidden parameter, for example _raw_params
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as %r but documentation doesn't define type" % (data['type'])
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-type-not-in-doc',
+ msg=msg
+ )
+ elif data['type'] != doc_type:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-type-does-not-match-spec',
+ msg=msg
+ )
+ else:
+ if doc_type is None:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " uses default type ('str') but documentation doesn't define type"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-missing-type',
+ msg=msg
+ )
+ elif doc_type != 'str':
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " implies type as 'str' but documentation defines as %r" % doc_type
+ self.reporter.error(
+ path=self.object_path,
+ code='implied-parameter-type-mismatch',
+ msg=msg
+ )
+
+ doc_choices = []
+ try:
+ for choice in doc_options_arg.get('choices', []):
+ try:
+ with CaptureStd():
+ doc_choices.append(_type_checker(choice))
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in documentation" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-choices-incompatible-type',
+ msg=msg
+ )
+ raise StopIteration()
+ except StopIteration:
+ continue
+
+ arg_choices = []
+ try:
+ for choice in data.get('choices', []):
+ try:
+ with CaptureStd():
+ arg_choices.append(_type_checker(choice))
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='incompatible-choices',
+ msg=msg
+ )
+ raise StopIteration()
+ except StopIteration:
+ continue
+
+ if not compare_unordered_lists(arg_choices, doc_choices):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-choices-do-not-match-spec',
+ msg=msg
+ )
+
+ doc_required = doc_options_arg.get('required', False)
+ data_required = data.get('required', False)
+ if (doc_required or data_required) and not (doc_required and data_required):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ if doc_required:
+ msg += " is not required, but is documented as being required"
+ else:
+ msg += " is required, but is not documented as being required"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-required-mismatch',
+ msg=msg
+ )
+
+ doc_elements = doc_options_arg.get('elements', None)
+ doc_type = doc_options_arg.get('type', 'str')
+ data_elements = data.get('elements', None)
+ if (doc_elements or data_elements) and not (doc_elements == data_elements):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ if data_elements:
+ msg += " specifies elements as %s," % data_elements
+ else:
+ msg += " does not specify elements,"
+ if doc_elements:
+ msg += "but elements is documented as being %s" % doc_elements
+ else:
+ msg += "but elements is not documented"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-elements-mismatch',
+ msg=msg
+ )
+
+ spec_suboptions = data.get('options')
+ doc_suboptions = doc_options_arg.get('suboptions', {})
+ if spec_suboptions:
+ if not doc_suboptions:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has sub-options but documentation does not define it"
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-suboption-docs',
+ msg=msg
+ )
+ self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
+ context=context + [arg], last_context_spec=data)
+
+ for arg in args_from_argspec:
+ if not str(arg).isidentifier():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is not a valid python identifier"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-invalid',
+ msg=msg
+ )
+
+ if docs:
+ args_from_docs = set()
+ for arg, data in doc_options.items():
+ args_from_docs.add(arg)
+ args_from_docs.update(data.get('aliases', []))
+
+ args_missing_from_docs = args_from_argspec.difference(args_from_docs)
+ docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
+ for arg in args_missing_from_docs:
+ if arg in provider_args:
+ # Provider args are being removed from network module top level
+ # So they are likely not documented on purpose
+ continue
+ msg = "Argument '%s'" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is listed in the argument_spec, but not documented in the module documentation"
+ self.reporter.error(
+ path=self.object_path,
+ code='undocumented-parameter',
+ msg=msg
+ )
+ for arg in docs_missing_from_args:
+ msg = "Argument '%s'" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
+ self.reporter.error(
+ path=self.object_path,
+ code='nonexistent-parameter-documented',
+ msg=msg
+ )
+
+ def _check_for_new_args(self, doc):
+ if not self.base_branch or self._is_new_module():
+ return
+
+ with CaptureStd():
+ try:
+ existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
+ self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name,
+ is_module=self.plugin_type == 'module')
+ existing_options = existing_doc.get('options', {}) or {}
+ except AssertionError:
+ fragment = doc['extends_documentation_fragment']
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-existing-doc-fragment',
+ msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
+ )
+ return
+ except Exception as e:
+ self.reporter.warning_trace(
+ path=self.object_path,
+ tracebk=e
+ )
+ self.reporter.warning(
+ path=self.object_path,
+ code='unknown-doc-fragment',
+ msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
+ )
+ return
+
+ try:
+ mod_collection_name = existing_doc.get('version_added_collection')
+ mod_version_added = self._create_strict_version(
+ str(existing_doc.get('version_added', '0.0')),
+ collection_name=mod_collection_name)
+ except ValueError:
+ mod_collection_name = self.collection_name
+ mod_version_added = self._create_strict_version('0.0')
+
+ options = doc.get('options', {}) or {}
+
+ should_be = '.'.join(ansible_version.split('.')[:2])
+ strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
+
+ for option, details in options.items():
+ try:
+ names = [option] + details.get('aliases', [])
+ except (TypeError, AttributeError):
+ # Reporting of this syntax error will be handled by schema validation.
+ continue
+
+ if any(name in existing_options for name in names):
+ # The option already existed. Make sure version_added didn't change.
+ for name in names:
+ existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
+ existing_version = existing_options.get(name, {}).get('version_added')
+ if existing_version:
+ break
+ current_collection_name = details.get('version_added_collection')
+ current_version = details.get('version_added')
+ if current_collection_name != existing_collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added-collection',
+ msg=('version_added for existing option (%s) should '
+ 'belong to collection %r. Currently belongs to %r' %
+ (option, current_collection_name, existing_collection_name))
+ )
+ elif str(current_version) != str(existing_version):
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added',
+ msg=('version_added for existing option (%s) should '
+ 'be %r. Currently %r' %
+ (option, existing_version, current_version))
+ )
+ continue
+
+ try:
+ collection_name = details.get('version_added_collection')
+ version_added = self._create_strict_version(
+ str(details.get('version_added', '0.0')),
+ collection_name=collection_name)
+ except ValueError as e:
+ # already reported during schema validation
+ continue
+
+ builtin = self.collection_name == 'ansible.builtin' and collection_name in ('ansible.builtin', None)
+ if not builtin and collection_name != self.collection_name:
+ continue
+ if (strict_ansible_version != mod_version_added and
+ (version_added < strict_ansible_version or
+ strict_ansible_version < version_added)):
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added',
+ msg=('version_added for new option (%s) should '
+ 'be %r. Currently %r' %
+ (option, should_be, version_added))
+ )
+
+ return existing_doc
+
+ @staticmethod
+ def is_on_rejectlist(path):
+ base_name = os.path.basename(path)
+ file_name = os.path.splitext(base_name)[0]
+
+ if file_name.startswith('_') and os.path.islink(path):
+ return True
+
+ if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.REJECTLIST):
+ return True
+
+ for pat in ModuleValidator.REJECTLIST_PATTERNS:
+ if fnmatch(base_name, pat):
+ return True
+
+ return False
+
+ def validate(self):
+ super(ModuleValidator, self).validate()
+ if not self._python_module() and not self._powershell_module() and not self._sidecar_doc():
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-extension',
+ msg=('Official Ansible modules must have a .py '
+ 'extension for python modules or a .ps1 '
+ 'for powershell modules')
+ )
+ self._python_module_override = True
+
+ if self._python_module() and self.ast is None:
+ self.reporter.error(
+ path=self.object_path,
+ code='python-syntax-error',
+ msg='Python SyntaxError while parsing module'
+ )
+ try:
+ compile(self.text, self.path, 'exec')
+ except Exception:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ return
+
+ end_of_deprecation_should_be_removed_only = False
+ doc_info = None
+ if self._python_module() or self._sidecar_doc():
+ doc_info, docs = self._validate_docs()
+
+ # See if current version => deprecated.removed_in, ie, should be docs only
+ if docs and docs.get('deprecated', False):
+
+ if 'removed_in' in docs['deprecated']:
+ removed_in = None
+ collection_name = docs['deprecated'].get('removed_from_collection')
+ version = docs['deprecated']['removed_in']
+ if collection_name != self.collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-module-deprecation-source',
+ msg=('The deprecation version for a module must be added in this collection')
+ )
+ else:
+ try:
+ removed_in = self._create_strict_version(str(version), collection_name=collection_name)
+ except ValueError as e:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-module-deprecation-version',
+ msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
+ )
+
+ if removed_in:
+ if not self.collection:
+ strict_ansible_version = self._create_strict_version(
+ '.'.join(ansible_version.split('.')[:2]), self.collection_name)
+ end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
+
+ if end_of_deprecation_should_be_removed_only:
+ self.reporter.error(
+ path=self.object_path,
+ code='ansible-deprecated-module',
+ msg='Module is marked for removal in version %s of Ansible when the current version is %s' % (
+ version, ansible_version),
+ )
+ elif self.collection_version:
+ strict_ansible_version = self.collection_version
+ end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
+
+ if end_of_deprecation_should_be_removed_only:
+ self.reporter.error(
+ path=self.object_path,
+ code='collection-deprecated-module',
+ msg='Module is marked for removal in version %s of this collection when the current version is %s' % (
+ version, self.collection_version_str),
+ )
+
+ # handle deprecation by date
+ if 'removed_at_date' in docs['deprecated']:
+ try:
+ removed_at_date = docs['deprecated']['removed_at_date']
+ if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
+ msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
+ self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
+ except ValueError:
+ # This happens if the date cannot be parsed. This is already checked by the schema.
+ pass
+
+ if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
+ if self.plugin_type == 'module':
+ self._validate_ansible_module_call(docs)
+ self._check_for_sys_exit()
+ self._find_rejectlist_imports()
+ if self.plugin_type == 'module':
+ self._find_module_utils()
+ self._find_has_import()
+
+ if doc_info:
+ first_callable = self._get_first_callable() or 1000000 # use a bogus "high" line number if no callable exists
+ self._ensure_imports_below_docs(doc_info, first_callable)
+
+ if self.plugin_type == 'module':
+ self._check_for_subprocess()
+ self._check_for_os_call()
+
+ if self._powershell_module():
+ self._validate_ps_replacers()
+ docs_path = self._find_ps_docs_file()
+
+ # We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
+ pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
+ if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_REJECTLIST:
+ with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv:
+ docs = docs_mv._validate_docs()[1]
+ self._validate_ansible_module_call(docs)
+
+ self._check_gpl3_header()
+ if not self._just_docs() and not self._sidecar_doc() and not end_of_deprecation_should_be_removed_only:
+ if self.plugin_type == 'module':
+ self._check_interpreter(powershell=self._powershell_module())
+ self._check_type_instead_of_isinstance(
+ powershell=self._powershell_module()
+ )
+
+
+class PythonPackageValidator(Validator):
+ REJECTLIST_FILES = frozenset(('__pycache__',))
+
+ def __init__(self, path, reporter=None):
+ super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
+
+ self.path = path
+ self.basename = os.path.basename(path)
+
+ @property
+ def object_name(self):
+ return self.basename
+
+ @property
+ def object_path(self):
+ return self.path
+
+ def validate(self):
+ super(PythonPackageValidator, self).validate()
+
+ if self.basename in self.REJECTLIST_FILES:
+ return
+
+ init_file = os.path.join(self.path, '__init__.py')
+ if not os.path.exists(init_file):
+ self.reporter.error(
+ path=self.object_path,
+ code='subdirectory-missing-init',
+ msg='Ansible module subdirectories must contain an __init__.py'
+ )
+
+
+def re_compile(value):
+ """
+ Argparse expects things to raise TypeError, re.compile raises an re.error
+ exception
+
+ This function is a shorthand to convert the re.error exception to a
+ TypeError
+ """
+
+ try:
+ return re.compile(value)
+ except re.error as e:
+ raise TypeError(e)
+
+
+def run():
+ parser = argparse.ArgumentParser(prog="validate-modules")
+ parser.add_argument('plugins', nargs='+',
+ help='Path to module/plugin or module/plugin directory')
+ parser.add_argument('-w', '--warnings', help='Show warnings',
+ action='store_true')
+ parser.add_argument('--exclude', help='RegEx exclusion pattern',
+ type=re_compile)
+ parser.add_argument('--arg-spec', help='Analyze module argument spec',
+ action='store_true', default=False)
+ parser.add_argument('--base-branch', default=None,
+ help='Used in determining if new options were added')
+ parser.add_argument('--format', choices=['json', 'plain'], default='plain',
+ help='Output format. Default: "%(default)s"')
+ parser.add_argument('--output', default='-',
+ help='Output location, use "-" for stdout. '
+ 'Default "%(default)s"')
+ parser.add_argument('--collection',
+ help='Specifies the path to the collection, when '
+ 'validating files within a collection. Ensure '
+ 'that ANSIBLE_COLLECTIONS_PATH is set so the '
+ 'contents of the collection can be located')
+ parser.add_argument('--collection-version',
+ help='The collection\'s version number used to check '
+ 'deprecations')
+ parser.add_argument('--plugin-type',
+ default='module',
+ help='The plugin type to validate. Defaults to %(default)s')
+
+ args = parser.parse_args()
+
+ args.plugins = [m.rstrip('/') for m in args.plugins]
+
+ reporter = Reporter()
+ git_cache = GitCache(args.base_branch, args.plugin_type)
+
+ check_dirs = set()
+
+ routing = None
+ if args.collection:
+ routing_file = 'meta/runtime.yml'
+ # Load meta/runtime.yml if it exists, as it may contain deprecation information
+ if os.path.isfile(routing_file):
+ try:
+ with open(routing_file) as f:
+ routing = yaml.safe_load(f)
+ except yaml.error.MarkedYAMLError as ex:
+ print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
+ except Exception as ex: # pylint: disable=broad-except
+ print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex))))
+
+ for plugin in args.plugins:
+ if os.path.isfile(plugin):
+ path = plugin
+ if args.exclude and args.exclude.search(path):
+ continue
+ if ModuleValidator.is_on_rejectlist(path):
+ continue
+ with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
+ analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
+ git_cache=git_cache, reporter=reporter, routing=routing,
+ plugin_type=args.plugin_type) as mv1:
+ mv1.validate()
+ check_dirs.add(os.path.dirname(path))
+
+ for root, dirs, files in os.walk(plugin):
+ basedir = root[len(plugin) + 1:].split('/', 1)[0]
+ if basedir in REJECTLIST_DIRS:
+ continue
+ for dirname in dirs:
+ if root == plugin and dirname in REJECTLIST_DIRS:
+ continue
+ path = os.path.join(root, dirname)
+ if args.exclude and args.exclude.search(path):
+ continue
+ check_dirs.add(path)
+
+ for filename in files:
+ path = os.path.join(root, filename)
+ if args.exclude and args.exclude.search(path):
+ continue
+ if ModuleValidator.is_on_rejectlist(path):
+ continue
+ with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
+ analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
+ git_cache=git_cache, reporter=reporter, routing=routing,
+ plugin_type=args.plugin_type) as mv2:
+ mv2.validate()
+
+ if not args.collection and args.plugin_type == 'module':
+ for path in sorted(check_dirs):
+ pv = PythonPackageValidator(path, reporter=reporter)
+ pv.validate()
+
+ if args.format == 'plain':
+ sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
+ else:
+ sys.exit(reporter.json(warnings=args.warnings, output=args.output))
+
+
+class GitCache:
+ def __init__(self, base_branch, plugin_type):
+ self.base_branch = base_branch
+ self.plugin_type = plugin_type
+
+ self.rel_path = 'lib/ansible/modules/'
+ if plugin_type != 'module':
+ self.rel_path = 'lib/ansible/plugins/%s/' % plugin_type
+
+ if self.base_branch:
+ self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, self.rel_path])
+ else:
+ self.base_tree = []
+
+ try:
+ self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', self.rel_path])
+ except GitError as ex:
+ if ex.status == 128:
+ # fallback when there is no .git directory
+ self.head_tree = self._get_module_files()
+ else:
+ raise
+ except FileNotFoundError:
+ # fallback when git is not installed
+ self.head_tree = self._get_module_files()
+
+ allowed_exts = ('.py', '.ps1')
+ if plugin_type != 'module':
+ allowed_exts = ('.py', )
+ self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in allowed_exts)
+
+ self.base_module_paths.pop('__init__.py', None)
+
+ self.head_aliased_modules = set()
+
+ for path in self.head_tree:
+ filename = os.path.basename(path)
+
+ if filename.startswith('_') and filename != '__init__.py':
+ if os.path.islink(path):
+ self.head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
+
+ def _get_module_files(self):
+ module_files = []
+
+ for (dir_path, dir_names, file_names) in os.walk(self.rel_path):
+ for file_name in file_names:
+ module_files.append(os.path.join(dir_path, file_name))
+
+ return module_files
+
+ @staticmethod
+ def _git(args):
+ cmd = ['git'] + args
+ p = subprocess.run(cmd, stdin=subprocess.DEVNULL, capture_output=True, text=True, check=False)
+
+ if p.returncode != 0:
+ raise GitError(p.stderr, p.returncode)
+
+ return p.stdout.splitlines()
+
+
+class GitError(Exception):
+ def __init__(self, message, status):
+ super(GitError, self).__init__(message)
+
+ self.status = status
+
+
+def main():
+ try:
+ run()
+ except KeyboardInterrupt:
+ pass
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py
new file mode 100644
index 0000000..03a1401
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/module_args.py
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2016 Matt Martz <matt@sivel.net>
+# Copyright (C) 2016 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import annotations
+
+import runpy
+import inspect
+import json
+import os
+import subprocess
+import sys
+
+from contextlib import contextmanager
+
+from ansible.executor.powershell.module_manifest import PSModuleDepFinder
+from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS, AnsibleModule
+from ansible.module_utils.six import reraise
+from ansible.module_utils._text import to_bytes, to_text
+
+from .utils import CaptureStd, find_executable, get_module_name_from_filename
+
+
+ANSIBLE_MODULE_CONSTRUCTOR_ARGS = tuple(list(inspect.signature(AnsibleModule.__init__).parameters)[1:])
+
+
+class AnsibleModuleCallError(RuntimeError):
+ pass
+
+
+class AnsibleModuleImportError(ImportError):
+ pass
+
+
+class AnsibleModuleNotInitialized(Exception):
+ pass
+
+
+class _FakeAnsibleModuleInit:
+ def __init__(self):
+ self.args = tuple()
+ self.kwargs = {}
+ self.called = False
+
+ def __call__(self, *args, **kwargs):
+ if args and isinstance(args[0], AnsibleModule):
+ # Make sure, due to creative calling, that we didn't end up with
+ # ``self`` in ``args``
+ self.args = args[1:]
+ else:
+ self.args = args
+ self.kwargs = kwargs
+ self.called = True
+ raise AnsibleModuleCallError('AnsibleModuleCallError')
+
+
+def _fake_load_params():
+ pass
+
+
+@contextmanager
+def setup_env(filename):
+ # Used to clean up imports later
+ pre_sys_modules = list(sys.modules.keys())
+
+ fake = _FakeAnsibleModuleInit()
+ module = __import__('ansible.module_utils.basic').module_utils.basic
+ _original_init = module.AnsibleModule.__init__
+ _original_load_params = module._load_params
+ setattr(module.AnsibleModule, '__init__', fake)
+ setattr(module, '_load_params', _fake_load_params)
+
+ try:
+ yield fake
+ finally:
+ setattr(module.AnsibleModule, '__init__', _original_init)
+ setattr(module, '_load_params', _original_load_params)
+
+ # Clean up imports to prevent issues with mutable data being used in modules
+ for k in list(sys.modules.keys()):
+ # It's faster if we limit to items in ansible.module_utils
+ # But if this causes problems later, we should remove it
+ if k not in pre_sys_modules and k.startswith('ansible.module_utils.'):
+ del sys.modules[k]
+
+
+def get_ps_argument_spec(filename, collection):
+ fqc_name = get_module_name_from_filename(filename, collection)
+
+ pwsh = find_executable('pwsh')
+ if not pwsh:
+ raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.')
+
+ module_path = os.path.join(os.getcwd(), filename)
+ b_module_path = to_bytes(module_path, errors='surrogate_or_strict')
+ with open(b_module_path, mode='rb') as module_fd:
+ b_module_data = module_fd.read()
+
+ ps_dep_finder = PSModuleDepFinder()
+ ps_dep_finder.scan_module(b_module_data, fqn=fqc_name)
+
+ # For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util.
+ ps_dep_finder._add_module(name=b"Ansible.ModuleUtils.AddType", ext=".psm1", fqn=None, optional=False, wrapper=False)
+
+ util_manifest = json.dumps({
+ 'module_path': to_text(module_path, errors='surrogate_or_strict'),
+ 'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'],
+ 'ps_utils': {name: info['path'] for name, info in ps_dep_finder.ps_modules.items()}
+ })
+
+ script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1')
+ proc = subprocess.run(['pwsh', script_path, util_manifest], stdin=subprocess.DEVNULL, capture_output=True, text=True, check=False)
+
+ if proc.returncode != 0:
+ raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (proc.stdout, proc.stderr))
+
+ kwargs = json.loads(proc.stdout)
+
+ # the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS
+ kwargs['argument_spec'] = kwargs.pop('options', {})
+
+ return kwargs['argument_spec'], kwargs
+
+
+def get_py_argument_spec(filename, collection):
+ name = get_module_name_from_filename(filename, collection)
+
+ with setup_env(filename) as fake:
+ try:
+ with CaptureStd():
+ runpy.run_module(name, run_name='__main__', alter_sys=True)
+ except AnsibleModuleCallError:
+ pass
+ except BaseException as e:
+ # we want to catch all exceptions here, including sys.exit
+ reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2])
+
+ if not fake.called:
+ raise AnsibleModuleNotInitialized()
+
+ try:
+ # Convert positional arguments to kwargs to make sure that all parameters are actually checked
+ for arg, arg_name in zip(fake.args, ANSIBLE_MODULE_CONSTRUCTOR_ARGS):
+ fake.kwargs[arg_name] = arg
+ # for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True}
+ argument_spec = fake.kwargs.get('argument_spec') or {}
+ # If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present.
+ # This is the only modification to argument_spec done by AnsibleModule itself, and which is
+ # not caught by setup_env's AnsibleModule replacement
+ if fake.kwargs.get('add_file_common_args'):
+ for k, v in FILE_COMMON_ARGUMENTS.items():
+ if k not in argument_spec:
+ argument_spec[k] = v
+ return argument_spec, fake.kwargs
+ except (TypeError, IndexError):
+ return {}, {}
+
+
+def get_argument_spec(filename, collection):
+ if filename.endswith('.py'):
+ return get_py_argument_spec(filename, collection)
+ else:
+ return get_ps_argument_spec(filename, collection)
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1 b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1
new file mode 100644
index 0000000..4183b2b
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/ps_argspec.ps1
@@ -0,0 +1,121 @@
+#Requires -Version 6
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$WarningPreference = "Stop"
+
+Function Resolve-CircularReference {
+ <#
+ .SYNOPSIS
+ Removes known types that cause a circular reference in their json serialization.
+
+ .PARAMETER Hash
+ The hash to scan for circular references
+ #>
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory = $true)]
+ [System.Collections.IDictionary]
+ $Hash
+ )
+
+ foreach ($key in [String[]]$Hash.Keys) {
+ $value = $Hash[$key]
+ if ($value -is [System.Collections.IDictionary]) {
+ Resolve-CircularReference -Hash $value
+ }
+ elseif ($value -is [Array] -or $value -is [System.Collections.IList]) {
+ $values = @(foreach ($v in $value) {
+ if ($v -is [System.Collections.IDictionary]) {
+ Resolve-CircularReference -Hash $v
+ }
+ , $v
+ })
+ $Hash[$key] = $values
+ }
+ elseif ($value -is [DateTime]) {
+ $Hash[$key] = $value.ToString("yyyy-MM-dd")
+ }
+ elseif ($value -is [delegate]) {
+ # Type can be set to a delegate function which defines it's own type. For the documentation we just
+ # reflection that as raw
+ if ($key -eq 'type') {
+ $Hash[$key] = 'raw'
+ }
+ else {
+ $Hash[$key] = $value.ToString() # Shouldn't ever happen but just in case.
+ }
+ }
+ }
+}
+
+$manifest = ConvertFrom-Json -InputObject $args[0] -AsHashtable
+if (-not $manifest.Contains('module_path') -or -not $manifest.module_path) {
+ Write-Error -Message "No module specified."
+ exit 1
+}
+$module_path = $manifest.module_path
+
+# Check if the path is relative and get the full path to the module
+if (-not ([System.IO.Path]::IsPathRooted($module_path))) {
+ $module_path = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($module_path)
+}
+
+if (-not (Test-Path -LiteralPath $module_path -PathType Leaf)) {
+ Write-Error -Message "The module at '$module_path' does not exist."
+ exit 1
+}
+
+$module_code = Get-Content -LiteralPath $module_path -Raw
+
+$powershell = [PowerShell]::Create()
+$powershell.Runspace.SessionStateProxy.SetVariable("ErrorActionPreference", "Stop")
+
+# Load the PowerShell module utils as the module may be using them to refer to shared module options. Currently we
+# can only load the PowerShell utils due to cross platform compatibility issues.
+if ($manifest.Contains('ps_utils')) {
+ foreach ($util_info in $manifest.ps_utils.GetEnumerator()) {
+ $util_name = $util_info.Key
+ $util_path = $util_info.Value
+
+ if (-not (Test-Path -LiteralPath $util_path -PathType Leaf)) {
+ # Failed to find the util path, just silently ignore for now and hope for the best.
+ continue
+ }
+
+ $util_sb = [ScriptBlock]::Create((Get-Content -LiteralPath $util_path -Raw))
+ $powershell.AddCommand('New-Module').AddParameters(@{
+ Name = $util_name
+ ScriptBlock = $util_sb
+ }) > $null
+ $powershell.AddCommand('Import-Module').AddParameter('WarningAction', 'SilentlyContinue') > $null
+ $powershell.AddCommand('Out-Null').AddStatement() > $null
+
+ # Also import it into the current runspace in case ps_argspec.ps1 needs to use it.
+ $null = New-Module -Name $util_name -ScriptBlock $util_sb | Import-Module -WarningAction SilentlyContinue
+ }
+}
+
+Add-CSharpType -References @(Get-Content -LiteralPath $manifest.ansible_basic -Raw)
+[Ansible.Basic.AnsibleModule]::_DebugArgSpec = $true
+
+$powershell.AddScript($module_code) > $null
+$powershell.Invoke() > $null
+$arg_spec = $powershell.Runspace.SessionStateProxy.GetVariable('ansibleTestArgSpec')
+
+if (-not $arg_spec) {
+ $err = $powershell.Streams.Error
+ if ($err) {
+ $err
+ }
+ else {
+ "Unknown error trying to get PowerShell arg spec"
+ }
+
+ exit 1
+}
+
+
+Resolve-CircularReference -Hash $arg_spec
+
+ConvertTo-Json -InputObject $arg_spec -Compress -Depth 99
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py
new file mode 100644
index 0000000..b2623ff
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/schema.py
@@ -0,0 +1,899 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Matt Martz <matt@sivel.net>
+# Copyright: (c) 2015, Rackspace US, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import annotations
+
+import re
+
+from ansible.module_utils.compat.version import StrictVersion
+from functools import partial
+from urllib.parse import urlparse
+
+from voluptuous import ALLOW_EXTRA, PREVENT_EXTRA, All, Any, Invalid, Length, Required, Schema, Self, ValueInvalid, Exclusive
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.collections import is_iterable
+from ansible.module_utils.parsing.convert_bool import boolean
+from ansible.parsing.quoting import unquote
+from ansible.utils.version import SemanticVersion
+from ansible.release import __version__
+
+from .utils import parse_isodate
+
+list_string_types = list(string_types)
+tuple_string_types = tuple(string_types)
+any_string_types = Any(*string_types)
+
+# Valid DOCUMENTATION.author lines
+# Based on Ansibulbot's extract_github_id()
+# author: First Last (@name) [optional anything]
+# "Ansible Core Team" - Used by the Bot
+# "Michael DeHaan" - nop
+# "OpenStack Ansible SIG" - OpenStack does not use GitHub
+# "Name (!UNKNOWN)" - For the few untraceable authors
+author_line = re.compile(r'^\w.*(\(@([\w-]+)\)|!UNKNOWN)(?![\w.])|^Ansible Core Team$|^Michael DeHaan$|^OpenStack Ansible SIG$')
+
+
+def _add_ansible_error_code(exception, error_code):
+ setattr(exception, 'ansible_error_code', error_code)
+ return exception
+
+
+def isodate(v, error_code=None):
+ try:
+ parse_isodate(v, allow_date=True)
+ except ValueError as e:
+ raise _add_ansible_error_code(Invalid(str(e)), error_code or 'ansible-invalid-date')
+ return v
+
+
+COLLECTION_NAME_RE = re.compile(r'^\w+(?:\.\w+)+$')
+FULLY_QUALIFIED_COLLECTION_RESOURCE_RE = re.compile(r'^\w+(?:\.\w+){2,}$')
+
+
+def collection_name(v, error_code=None):
+ if not isinstance(v, string_types):
+ raise _add_ansible_error_code(
+ Invalid('Collection name must be a string'), error_code or 'collection-invalid-name')
+ m = COLLECTION_NAME_RE.match(v)
+ if not m:
+ raise _add_ansible_error_code(
+ Invalid('Collection name must be of format `<namespace>.<name>`'), error_code or 'collection-invalid-name')
+ return v
+
+
+def deprecation_versions():
+ """Create a list of valid version for deprecation entries, current+4"""
+ major, minor = [int(version) for version in __version__.split('.')[0:2]]
+ return Any(*['{0}.{1}'.format(major, minor + increment) for increment in range(0, 5)])
+
+
+def version(for_collection=False):
+ if for_collection:
+ # We do not accept floats for versions in collections
+ return Any(*string_types)
+ return Any(float, *string_types)
+
+
+def date(error_code=None):
+ return Any(isodate, error_code=error_code)
+
+
+_MODULE = re.compile(r"\bM\(([^)]+)\)")
+_LINK = re.compile(r"\bL\(([^)]+)\)")
+_URL = re.compile(r"\bU\(([^)]+)\)")
+_REF = re.compile(r"\bR\(([^)]+)\)")
+
+
+def _check_module_link(directive, content):
+ if not FULLY_QUALIFIED_COLLECTION_RESOURCE_RE.match(content):
+ raise _add_ansible_error_code(
+ Invalid('Directive "%s" must contain a FQCN' % directive), 'invalid-documentation-markup')
+
+
+def _check_link(directive, content):
+ if ',' not in content:
+ raise _add_ansible_error_code(
+ Invalid('Directive "%s" must contain a comma' % directive), 'invalid-documentation-markup')
+ idx = content.rindex(',')
+ title = content[:idx]
+ url = content[idx + 1:].lstrip(' ')
+ _check_url(directive, url)
+
+
+def _check_url(directive, content):
+ try:
+ parsed_url = urlparse(content)
+ if parsed_url.scheme not in ('', 'http', 'https'):
+ raise ValueError('Schema must be HTTP, HTTPS, or not specified')
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('Directive "%s" must contain an URL' % directive), 'invalid-documentation-markup')
+
+
+def _check_ref(directive, content):
+ if ',' not in content:
+ raise _add_ansible_error_code(
+ Invalid('Directive "%s" must contain a comma' % directive), 'invalid-documentation-markup')
+
+
+def doc_string(v):
+ """Match a documentation string."""
+ if not isinstance(v, string_types):
+ raise _add_ansible_error_code(
+ Invalid('Must be a string'), 'invalid-documentation')
+ for m in _MODULE.finditer(v):
+ _check_module_link(m.group(0), m.group(1))
+ for m in _LINK.finditer(v):
+ _check_link(m.group(0), m.group(1))
+ for m in _URL.finditer(v):
+ _check_url(m.group(0), m.group(1))
+ for m in _REF.finditer(v):
+ _check_ref(m.group(0), m.group(1))
+ return v
+
+
+def doc_string_or_strings(v):
+ """Match a documentation string, or list of strings."""
+ if isinstance(v, string_types):
+ return doc_string(v)
+ if isinstance(v, (list, tuple)):
+ return [doc_string(vv) for vv in v]
+ raise _add_ansible_error_code(
+ Invalid('Must be a string or list of strings'), 'invalid-documentation')
+
+
+def is_callable(v):
+ if not callable(v):
+ raise ValueInvalid('not a valid value')
+ return v
+
+
+def sequence_of_sequences(min=None, max=None):
+ return All(
+ Any(
+ None,
+ [Any(list, tuple)],
+ tuple([Any(list, tuple)]),
+ ),
+ Any(
+ None,
+ [Length(min=min, max=max)],
+ tuple([Length(min=min, max=max)]),
+ ),
+ )
+
+
+seealso_schema = Schema(
+ [
+ Any(
+ {
+ Required('module'): Any(*string_types),
+ 'description': doc_string,
+ },
+ {
+ Required('ref'): Any(*string_types),
+ Required('description'): doc_string,
+ },
+ {
+ Required('name'): Any(*string_types),
+ Required('link'): Any(*string_types),
+ Required('description'): doc_string,
+ },
+ ),
+ ]
+)
+
+
+argument_spec_types = ['bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw',
+ 'sid', 'str']
+
+
+argument_spec_modifiers = {
+ 'mutually_exclusive': sequence_of_sequences(min=2),
+ 'required_together': sequence_of_sequences(min=2),
+ 'required_one_of': sequence_of_sequences(min=2),
+ 'required_if': sequence_of_sequences(min=3, max=4),
+ 'required_by': Schema({str: Any(list_string_types, tuple_string_types, *string_types)}),
+}
+
+
+def no_required_with_default(v):
+ if v.get('default') and v.get('required'):
+ raise Invalid('required=True cannot be supplied with a default')
+ return v
+
+
+def elements_with_list(v):
+ if v.get('elements') and v.get('type') != 'list':
+ raise Invalid('type must be list to use elements')
+ return v
+
+
+def options_with_apply_defaults(v):
+ if v.get('apply_defaults') and not v.get('options'):
+ raise Invalid('apply_defaults=True requires options to be set')
+ return v
+
+
+def check_removal_version(v, version_field, collection_name_field, error_code='invalid-removal-version'):
+ version = v.get(version_field)
+ collection_name = v.get(collection_name_field)
+ if not isinstance(version, string_types) or not isinstance(collection_name, string_types):
+ # If they are not strings, schema validation will have already complained.
+ return v
+ if collection_name == 'ansible.builtin':
+ try:
+ parsed_version = StrictVersion()
+ parsed_version.parse(version)
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('%s (%r) is not a valid ansible-core version: %s' % (version_field, version, exc)),
+ error_code=error_code)
+ return v
+ try:
+ parsed_version = SemanticVersion()
+ parsed_version.parse(version)
+ if parsed_version.major != 0 and (parsed_version.minor != 0 or parsed_version.patch != 0):
+ raise _add_ansible_error_code(
+ Invalid('%s (%r) must be a major release, not a minor or patch release (see specification at '
+ 'https://semver.org/)' % (version_field, version)),
+ error_code='removal-version-must-be-major')
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('%s (%r) is not a valid collection version (see specification at https://semver.org/): '
+ '%s' % (version_field, version, exc)),
+ error_code=error_code)
+ return v
+
+
+def option_deprecation(v):
+ if v.get('removed_in_version') or v.get('removed_at_date'):
+ if v.get('removed_in_version') and v.get('removed_at_date'):
+ raise _add_ansible_error_code(
+ Invalid('Only one of removed_in_version and removed_at_date must be specified'),
+ error_code='deprecation-either-date-or-version')
+ if not v.get('removed_from_collection'):
+ raise _add_ansible_error_code(
+ Invalid('If removed_in_version or removed_at_date is specified, '
+ 'removed_from_collection must be specified as well'),
+ error_code='deprecation-collection-missing')
+ check_removal_version(v,
+ version_field='removed_in_version',
+ collection_name_field='removed_from_collection',
+ error_code='invalid-removal-version')
+ return
+ if v.get('removed_from_collection'):
+ raise Invalid('removed_from_collection cannot be specified without either '
+ 'removed_in_version or removed_at_date')
+
+
+def argument_spec_schema(for_collection):
+ any_string_types = Any(*string_types)
+ schema = {
+ any_string_types: {
+ 'type': Any(is_callable, *argument_spec_types),
+ 'elements': Any(*argument_spec_types),
+ 'default': object,
+ 'fallback': Any(
+ (is_callable, list_string_types),
+ [is_callable, list_string_types],
+ ),
+ 'choices': Any([object], (object,)),
+ 'required': bool,
+ 'no_log': bool,
+ 'aliases': Any(list_string_types, tuple(list_string_types)),
+ 'apply_defaults': bool,
+ 'removed_in_version': version(for_collection),
+ 'removed_at_date': date(),
+ 'removed_from_collection': collection_name,
+ 'options': Self,
+ 'deprecated_aliases': Any([All(
+ Any(
+ {
+ Required('name'): Any(*string_types),
+ Required('date'): date(),
+ Required('collection_name'): collection_name,
+ },
+ {
+ Required('name'): Any(*string_types),
+ Required('version'): version(for_collection),
+ Required('collection_name'): collection_name,
+ },
+ ),
+ partial(check_removal_version,
+ version_field='version',
+ collection_name_field='collection_name',
+ error_code='invalid-removal-version')
+ )]),
+ }
+ }
+ schema[any_string_types].update(argument_spec_modifiers)
+ schemas = All(
+ schema,
+ Schema({any_string_types: no_required_with_default}),
+ Schema({any_string_types: elements_with_list}),
+ Schema({any_string_types: options_with_apply_defaults}),
+ Schema({any_string_types: option_deprecation}),
+ )
+ return Schema(schemas)
+
+
+def ansible_module_kwargs_schema(module_name, for_collection):
+ schema = {
+ 'argument_spec': argument_spec_schema(for_collection),
+ 'bypass_checks': bool,
+ 'no_log': bool,
+ 'check_invalid_arguments': Any(None, bool),
+ 'add_file_common_args': bool,
+ 'supports_check_mode': bool,
+ }
+ if module_name.endswith(('_info', '_facts')):
+ del schema['supports_check_mode']
+ schema[Required('supports_check_mode')] = True
+ schema.update(argument_spec_modifiers)
+ return Schema(schema)
+
+
+json_value = Schema(Any(
+ None,
+ int,
+ float,
+ [Self],
+ *(list({str_type: Self} for str_type in string_types) + list(string_types))
+))
+
+
+def version_added(v, error_code='version-added-invalid', accept_historical=False):
+ if 'version_added' in v:
+ version_added = v.get('version_added')
+ if isinstance(version_added, string_types):
+ # If it is not a string, schema validation will have already complained
+ # - or we have a float and we are in ansible/ansible, in which case we're
+ # also happy.
+ if v.get('version_added_collection') == 'ansible.builtin':
+ if version_added == 'historical' and accept_historical:
+ return v
+ try:
+ version = StrictVersion()
+ version.parse(version_added)
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('version_added (%r) is not a valid ansible-core version: '
+ '%s' % (version_added, exc)),
+ error_code=error_code)
+ else:
+ try:
+ version = SemanticVersion()
+ version.parse(version_added)
+ if version.major != 0 and version.patch != 0:
+ raise _add_ansible_error_code(
+ Invalid('version_added (%r) must be a major or minor release, '
+ 'not a patch release (see specification at '
+ 'https://semver.org/)' % (version_added, )),
+ error_code='version-added-must-be-major-or-minor')
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('version_added (%r) is not a valid collection version '
+ '(see specification at https://semver.org/): '
+ '%s' % (version_added, exc)),
+ error_code=error_code)
+ elif 'version_added_collection' in v:
+ # Must have been manual intervention, since version_added_collection is only
+ # added automatically when version_added is present
+ raise Invalid('version_added_collection cannot be specified without version_added')
+ return v
+
+
+def check_option_elements(v):
+ # Check whether elements is there iff type == 'list'
+ v_type = v.get('type')
+ v_elements = v.get('elements')
+ if v_type == 'list' and v_elements is None:
+ raise _add_ansible_error_code(
+ Invalid('Argument defines type as list but elements is not defined'),
+ error_code='parameter-list-no-elements') # FIXME: adjust error code?
+ if v_type != 'list' and v_elements is not None:
+ raise _add_ansible_error_code(
+ Invalid('Argument defines parameter elements as %s but it is valid only when value of parameter type is list' % (v_elements, )),
+ error_code='doc-elements-invalid')
+ return v
+
+
+def get_type_checker(v):
+ v_type = v.get('type')
+ if v_type == 'list':
+ elt_checker, elt_name = get_type_checker({'type': v.get('elements')})
+
+ def list_checker(value):
+ if isinstance(value, string_types):
+ value = [unquote(x.strip()) for x in value.split(',')]
+ if not isinstance(value, list):
+ raise ValueError('Value must be a list')
+ if elt_checker:
+ for elt in value:
+ try:
+ elt_checker(elt)
+ except Exception as exc:
+ raise ValueError('Entry %r is not of type %s: %s' % (elt, elt_name, exc))
+
+ return list_checker, ('list of %s' % elt_name) if elt_checker else 'list'
+
+ if v_type in ('boolean', 'bool'):
+ return partial(boolean, strict=False), v_type
+
+ if v_type in ('integer', 'int'):
+ return int, v_type
+
+ if v_type == 'float':
+ return float, v_type
+
+ if v_type == 'none':
+ def none_checker(value):
+ if value not in ('None', None):
+ raise ValueError('Value must be "None" or none')
+
+ return none_checker, v_type
+
+ if v_type in ('str', 'string', 'path', 'tmp', 'temppath', 'tmppath'):
+ def str_checker(value):
+ if not isinstance(value, string_types):
+ raise ValueError('Value must be string')
+
+ return str_checker, v_type
+
+ if v_type in ('pathspec', 'pathlist'):
+ def path_list_checker(value):
+ if not isinstance(value, string_types) and not is_iterable(value):
+ raise ValueError('Value must be string or list of strings')
+
+ return path_list_checker, v_type
+
+ if v_type in ('dict', 'dictionary'):
+ def dict_checker(value):
+ if not isinstance(value, dict):
+ raise ValueError('Value must be dictionary')
+
+ return dict_checker, v_type
+
+ return None, 'unknown'
+
+
+def check_option_choices(v):
+ # Check whether choices have the correct type
+ v_choices = v.get('choices')
+ if not is_iterable(v_choices):
+ return v
+
+ if v.get('type') == 'list':
+ # choices for a list type means that every list element must be one of these choices
+ type_checker, type_name = get_type_checker({'type': v.get('elements')})
+ else:
+ type_checker, type_name = get_type_checker(v)
+ if type_checker is None:
+ return v
+
+ for value in v_choices:
+ try:
+ type_checker(value)
+ except Exception as exc:
+ raise _add_ansible_error_code(
+ Invalid(
+ 'Argument defines choices as (%r) but this is incompatible with argument type %s: %s' % (value, type_name, exc)),
+ error_code='doc-choices-incompatible-type')
+
+ return v
+
+
+def check_option_default(v):
+ # Check whether default is only present if required=False, and whether default has correct type
+ v_default = v.get('default')
+ if v.get('required') and v_default is not None:
+ raise _add_ansible_error_code(
+ Invalid(
+ 'Argument is marked as required but specifies a default.'
+ ' Arguments with a default should not be marked as required'),
+ error_code='no-default-for-required-parameter') # FIXME: adjust error code?
+
+ if v_default is None:
+ return v
+
+ type_checker, type_name = get_type_checker(v)
+ if type_checker is None:
+ return v
+
+ try:
+ type_checker(v_default)
+ except Exception as exc:
+ raise _add_ansible_error_code(
+ Invalid(
+ 'Argument defines default as (%r) but this is incompatible with parameter type %s: %s' % (v_default, type_name, exc)),
+ error_code='incompatible-default-type')
+
+ return v
+
+
+def list_dict_option_schema(for_collection, plugin_type):
+ if plugin_type == 'module':
+ option_types = Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str')
+ element_types = option_types
+ else:
+ option_types = Any(None, 'boolean', 'bool', 'integer', 'int', 'float', 'list', 'dict', 'dictionary', 'none',
+ 'path', 'tmp', 'temppath', 'tmppath', 'pathspec', 'pathlist', 'str', 'string', 'raw')
+ element_types = Any(None, 'boolean', 'bool', 'integer', 'int', 'float', 'list', 'dict', 'dictionary', 'path', 'str', 'string', 'raw')
+
+ basic_option_schema = {
+ Required('description'): doc_string_or_strings,
+ 'required': bool,
+ 'choices': list,
+ 'aliases': Any(list_string_types),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'default': json_value,
+ # Note: Types are strings, not literal bools, such as True or False
+ 'type': option_types,
+ # in case of type='list' elements define type of individual item in list
+ 'elements': element_types,
+ }
+ if plugin_type != 'module':
+ basic_option_schema['name'] = Any(*string_types)
+ deprecated_schema = All(
+ Schema(
+ All(
+ {
+ # This definition makes sure everything has the correct types/values
+ 'why': doc_string,
+ 'alternatives': doc_string,
+ # vod stands for 'version or date'; this is the name of the exclusive group
+ Exclusive('removed_at_date', 'vod'): date(),
+ Exclusive('version', 'vod'): version(for_collection),
+ 'collection_name': collection_name,
+ },
+ {
+ # This definition makes sure that everything we require is there
+ Required('why'): Any(*string_types),
+ 'alternatives': Any(*string_types),
+ Required(Any('removed_at_date', 'version')): Any(*string_types),
+ Required('collection_name'): Any(*string_types),
+ },
+ ),
+ extra=PREVENT_EXTRA
+ ),
+ partial(check_removal_version,
+ version_field='version',
+ collection_name_field='collection_name',
+ error_code='invalid-removal-version'),
+ )
+ env_schema = All(
+ Schema({
+ Required('name'): Any(*string_types),
+ 'deprecated': deprecated_schema,
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ }, extra=PREVENT_EXTRA),
+ partial(version_added, error_code='option-invalid-version-added')
+ )
+ ini_schema = All(
+ Schema({
+ Required('key'): Any(*string_types),
+ Required('section'): Any(*string_types),
+ 'deprecated': deprecated_schema,
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ }, extra=PREVENT_EXTRA),
+ partial(version_added, error_code='option-invalid-version-added')
+ )
+ vars_schema = All(
+ Schema({
+ Required('name'): Any(*string_types),
+ 'deprecated': deprecated_schema,
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ }, extra=PREVENT_EXTRA),
+ partial(version_added, error_code='option-invalid-version-added')
+ )
+ cli_schema = All(
+ Schema({
+ Required('name'): Any(*string_types),
+ 'option': Any(*string_types),
+ 'deprecated': deprecated_schema,
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ }, extra=PREVENT_EXTRA),
+ partial(version_added, error_code='option-invalid-version-added')
+ )
+ keyword_schema = All(
+ Schema({
+ Required('name'): Any(*string_types),
+ 'deprecated': deprecated_schema,
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ }, extra=PREVENT_EXTRA),
+ partial(version_added, error_code='option-invalid-version-added')
+ )
+ basic_option_schema.update({
+ 'env': [env_schema],
+ 'ini': [ini_schema],
+ 'vars': [vars_schema],
+ 'cli': [cli_schema],
+ 'keyword': [keyword_schema],
+ 'deprecated': deprecated_schema,
+ })
+
+ suboption_schema = dict(basic_option_schema)
+ suboption_schema.update({
+ # Recursive suboptions
+ 'suboptions': Any(None, *list({str_type: Self} for str_type in string_types)),
+ })
+ suboption_schema = Schema(All(
+ suboption_schema,
+ check_option_elements,
+ check_option_choices,
+ check_option_default,
+ ), extra=PREVENT_EXTRA)
+
+ # This generates list of dicts with keys from string_types and suboption_schema value
+ # for example in Python 3: {str: suboption_schema}
+ list_dict_suboption_schema = [{str_type: suboption_schema} for str_type in string_types]
+
+ option_schema = dict(basic_option_schema)
+ option_schema.update({
+ 'suboptions': Any(None, *list_dict_suboption_schema),
+ })
+ option_schema = Schema(All(
+ option_schema,
+ check_option_elements,
+ check_option_choices,
+ check_option_default,
+ ), extra=PREVENT_EXTRA)
+
+ option_version_added = Schema(
+ All({
+ 'suboptions': Any(None, *[{str_type: Self} for str_type in string_types]),
+ }, partial(version_added, error_code='option-invalid-version-added')),
+ extra=ALLOW_EXTRA
+ )
+
+ # This generates list of dicts with keys from string_types and option_schema value
+ # for example in Python 3: {str: option_schema}
+ return [{str_type: All(option_schema, option_version_added)} for str_type in string_types]
+
+
+def return_contains(v):
+ schema = Schema(
+ {
+ Required('contains'): Any(dict, list, *string_types)
+ },
+ extra=ALLOW_EXTRA
+ )
+ if v.get('type') == 'complex':
+ return schema(v)
+ return v
+
+
+def return_schema(for_collection, plugin_type='module'):
+ if plugin_type == 'module':
+ return_types = Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'raw', 'str')
+ element_types = Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str')
+ else:
+ return_types = Any(None, 'boolean', 'bool', 'integer', 'int', 'float', 'list', 'dict', 'dictionary', 'path', 'str', 'string', 'raw')
+ element_types = return_types
+
+ basic_return_option_schema = {
+ Required('description'): doc_string_or_strings,
+ 'returned': doc_string,
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'sample': json_value,
+ 'example': json_value,
+ # in case of type='list' elements define type of individual item in list
+ 'elements': element_types,
+ 'choices': Any([object], (object,)),
+ }
+ if plugin_type == 'module':
+ # type is only required for modules right now
+ basic_return_option_schema[Required('type')] = return_types
+ else:
+ basic_return_option_schema['type'] = return_types
+
+ inner_return_option_schema = dict(basic_return_option_schema)
+ inner_return_option_schema.update({
+ 'contains': Any(None, *list({str_type: Self} for str_type in string_types)),
+ })
+ return_contains_schema = Any(
+ All(
+ Schema(inner_return_option_schema),
+ Schema(return_contains),
+ Schema(partial(version_added, error_code='option-invalid-version-added')),
+ ),
+ Schema(type(None)),
+ )
+
+ # This generates list of dicts with keys from string_types and return_contains_schema value
+ # for example in Python 3: {str: return_contains_schema}
+ list_dict_return_contains_schema = [{str_type: return_contains_schema} for str_type in string_types]
+
+ return_option_schema = dict(basic_return_option_schema)
+ return_option_schema.update({
+ 'contains': Any(None, *list_dict_return_contains_schema),
+ })
+ if plugin_type == 'module':
+ # 'returned' is required on top-level
+ del return_option_schema['returned']
+ return_option_schema[Required('returned')] = Any(*string_types)
+ return Any(
+ All(
+ Schema(
+ {
+ any_string_types: return_option_schema
+ }
+ ),
+ Schema({any_string_types: return_contains}),
+ Schema({any_string_types: partial(version_added, error_code='option-invalid-version-added')}),
+ ),
+ Schema(type(None)),
+ )
+
+
+def deprecation_schema(for_collection):
+ main_fields = {
+ Required('why'): doc_string,
+ Required('alternative'): doc_string,
+ Required('removed_from_collection'): collection_name,
+ 'removed': Any(True),
+ }
+
+ date_schema = {
+ Required('removed_at_date'): date(),
+ }
+ date_schema.update(main_fields)
+
+ if for_collection:
+ version_schema = {
+ Required('removed_in'): version(for_collection),
+ }
+ else:
+ version_schema = {
+ Required('removed_in'): deprecation_versions(),
+ }
+ version_schema.update(main_fields)
+
+ result = Any(
+ Schema(version_schema, extra=PREVENT_EXTRA),
+ Schema(date_schema, extra=PREVENT_EXTRA),
+ )
+
+ if for_collection:
+ result = All(
+ result,
+ partial(check_removal_version,
+ version_field='removed_in',
+ collection_name_field='removed_from_collection',
+ error_code='invalid-removal-version'))
+
+ return result
+
+
+def author(value):
+ if value is None:
+ return value # let schema checks handle
+
+ if not is_iterable(value):
+ value = [value]
+
+ for line in value:
+ if not isinstance(line, string_types):
+ continue # let schema checks handle
+ m = author_line.search(line)
+ if not m:
+ raise Invalid("Invalid author")
+
+ return value
+
+
+def doc_schema(module_name, for_collection=False, deprecated_module=False, plugin_type='module'):
+
+ if module_name.startswith('_'):
+ module_name = module_name[1:]
+ deprecated_module = True
+ if for_collection is False and plugin_type == 'connection' and module_name == 'paramiko_ssh':
+ # The plugin loader has a hard-coded exception: when the builtin connection 'paramiko' is
+ # referenced, it loads 'paramiko_ssh' instead. That's why in this plugin, the name must be
+ # 'paramiko' and not 'paramiko_ssh'.
+ module_name = 'paramiko'
+ doc_schema_dict = {
+ Required('module' if plugin_type == 'module' else 'name'): module_name,
+ Required('short_description'): doc_string,
+ Required('description'): doc_string_or_strings,
+ 'notes': Any(None, [doc_string]),
+ 'seealso': Any(None, seealso_schema),
+ 'requirements': [doc_string],
+ 'todo': Any(None, doc_string_or_strings),
+ 'options': Any(None, *list_dict_option_schema(for_collection, plugin_type)),
+ 'extends_documentation_fragment': Any(list_string_types, *string_types),
+ 'version_added_collection': collection_name,
+ }
+ if plugin_type == 'module':
+ doc_schema_dict[Required('author')] = All(Any(None, list_string_types, *string_types), author)
+ else:
+ # author is optional for plugins (for now)
+ doc_schema_dict['author'] = All(Any(None, list_string_types, *string_types), author)
+ if plugin_type == 'callback':
+ doc_schema_dict[Required('type')] = Any('aggregate', 'notification', 'stdout')
+
+ if for_collection:
+ # Optional
+ doc_schema_dict['version_added'] = version(for_collection=True)
+ else:
+ doc_schema_dict[Required('version_added')] = version(for_collection=False)
+
+ if deprecated_module:
+ deprecation_required_scheme = {
+ Required('deprecated'): Any(deprecation_schema(for_collection=for_collection)),
+ }
+
+ doc_schema_dict.update(deprecation_required_scheme)
+
+ def add_default_attributes(more=None):
+ schema = {
+ 'description': doc_string_or_strings,
+ 'details': doc_string_or_strings,
+ 'support': any_string_types,
+ 'version_added_collection': any_string_types,
+ 'version_added': any_string_types,
+ }
+ if more:
+ schema.update(more)
+ return schema
+
+ doc_schema_dict['attributes'] = Schema(
+ All(
+ Schema({
+ any_string_types: {
+ Required('description'): doc_string_or_strings,
+ Required('support'): Any('full', 'partial', 'none', 'N/A'),
+ 'details': doc_string_or_strings,
+ 'version_added_collection': collection_name,
+ 'version_added': version(for_collection=for_collection),
+ },
+ }, extra=ALLOW_EXTRA),
+ partial(version_added, error_code='attribute-invalid-version-added', accept_historical=False),
+ Schema({
+ any_string_types: add_default_attributes(),
+ 'action_group': add_default_attributes({
+ Required('membership'): list_string_types,
+ }),
+ 'forced_action_plugin': add_default_attributes({
+ Required('action_plugin'): any_string_types,
+ }),
+ 'platform': add_default_attributes({
+ Required('platforms'): Any(list_string_types, *string_types)
+ }),
+ }, extra=PREVENT_EXTRA),
+ )
+ )
+ return Schema(
+ All(
+ Schema(
+ doc_schema_dict,
+ extra=PREVENT_EXTRA
+ ),
+ partial(version_added, error_code='module-invalid-version-added', accept_historical=not for_collection),
+ )
+ )
+
+
+# Things to add soon
+####################
+# 1) Recursively validate `type: complex` fields
+# This will improve documentation, though require fair amount of module tidyup
+
+# Possible Future Enhancements
+##############################
+
+# 1) Don't allow empty options for choices, aliases, etc
+# 2) If type: bool ensure choices isn't set - perhaps use Exclusive
+# 3) both version_added should be quoted floats
+
+# Tool that takes JSON and generates RETURN skeleton (needs to support complex structures)
diff --git a/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
new file mode 100644
index 0000000..88d5b01
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py
@@ -0,0 +1,222 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import annotations
+
+import ast
+import datetime
+import os
+import re
+import sys
+
+from io import BytesIO, TextIOWrapper
+
+import yaml
+import yaml.reader
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.common.yaml import SafeLoader
+from ansible.module_utils.six import string_types
+from ansible.parsing.yaml.loader import AnsibleLoader
+
+
+class AnsibleTextIOWrapper(TextIOWrapper):
+ def write(self, s):
+ super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace'))
+
+
+def find_executable(executable, cwd=None, path=None):
+ """Finds the full path to the executable specified"""
+ match = None
+ real_cwd = os.getcwd()
+
+ if not cwd:
+ cwd = real_cwd
+
+ if os.path.dirname(executable):
+ target = os.path.join(cwd, executable)
+ if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
+ match = executable
+ else:
+ path = os.environ.get('PATH', os.path.defpath)
+
+ path_dirs = path.split(os.path.pathsep)
+ seen_dirs = set()
+
+ for path_dir in path_dirs:
+ if path_dir in seen_dirs:
+ continue
+
+ seen_dirs.add(path_dir)
+
+ if os.path.abspath(path_dir) == real_cwd:
+ path_dir = cwd
+
+ candidate = os.path.join(path_dir, executable)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ match = candidate
+ break
+
+ return match
+
+
+def find_globals(g, tree):
+ """Uses AST to find globals in an ast tree"""
+ for child in tree:
+ if hasattr(child, 'body') and isinstance(child.body, list):
+ find_globals(g, child.body)
+ elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
+ g.add(child.name)
+ continue
+ elif isinstance(child, ast.Assign):
+ try:
+ g.add(child.targets[0].id)
+ except (IndexError, AttributeError):
+ pass
+ elif isinstance(child, ast.Import):
+ g.add(child.names[0].name)
+ elif isinstance(child, ast.ImportFrom):
+ for name in child.names:
+ g_name = name.asname or name.name
+ if g_name == '*':
+ continue
+ g.add(g_name)
+
+
+class CaptureStd():
+ """Context manager to handle capturing stderr and stdout"""
+
+ def __enter__(self):
+ self.sys_stdout = sys.stdout
+ self.sys_stderr = sys.stderr
+ sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding)
+ sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ sys.stdout = self.sys_stdout
+ sys.stderr = self.sys_stderr
+
+ def get(self):
+ """Return ``(stdout, stderr)``"""
+
+ return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue()
+
+
+def get_module_name_from_filename(filename, collection):
+ # Calculate the module's name so that relative imports work correctly
+ if collection:
+ # collection is a relative path, example: ansible_collections/my_namespace/my_collection
+ # filename is a relative path, example: plugins/modules/my_module.py
+ path = os.path.join(collection, filename)
+ else:
+ # filename is a relative path, example: lib/ansible/modules/system/ping.py
+ path = os.path.relpath(filename, 'lib')
+
+ name = os.path.splitext(path)[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def parse_yaml(value, lineno, module, name, load_all=False, ansible_loader=False):
+ traces = []
+ errors = []
+ data = None
+
+ if load_all:
+ yaml_load = yaml.load_all
+ else:
+ yaml_load = yaml.load
+
+ if ansible_loader:
+ loader = AnsibleLoader
+ else:
+ loader = SafeLoader
+
+ try:
+ data = yaml_load(value, Loader=loader)
+ if load_all:
+ data = list(data)
+ except yaml.MarkedYAMLError as e:
+ errors.append({
+ 'msg': '%s is not valid YAML' % name,
+ 'line': e.problem_mark.line + lineno,
+ 'column': e.problem_mark.column + 1
+ })
+ traces.append(e)
+ except yaml.reader.ReaderError as e:
+ traces.append(e)
+ # TODO: Better line/column detection
+ errors.append({
+ 'msg': ('%s is not valid YAML. Character '
+ '0x%x at position %d.' % (name, e.character, e.position)),
+ 'line': lineno
+ })
+ except yaml.YAMLError as e:
+ traces.append(e)
+ errors.append({
+ 'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e),
+ 'line': lineno
+ })
+
+ return data, errors, traces
+
+
+def is_empty(value):
+ """Evaluate null like values excluding False"""
+ if value is False:
+ return False
+ return not bool(value)
+
+
+def compare_unordered_lists(a, b):
+ """Safe list comparisons
+
+ Supports:
+ - unordered lists
+ - unhashable elements
+ """
+ return len(a) == len(b) and all(x in b for x in a)
+
+
+class NoArgsAnsibleModule(AnsibleModule):
+ """AnsibleModule that does not actually load params. This is used to get access to the
+ methods within AnsibleModule without having to fake a bunch of data
+ """
+ def _load_params(self):
+ self.params = {'_ansible_selinux_special_fs': [], '_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False, '_ansible_check_mode': False}
+
+
+def parse_isodate(v, allow_date):
+ if allow_date:
+ if isinstance(v, datetime.date):
+ return v
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD) or YAML date'
+ else:
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
+ if not isinstance(v, string_types):
+ raise ValueError(msg)
+ # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
+ # we have to do things manually.
+ if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v):
+ raise ValueError(msg)
+ try:
+ return datetime.datetime.strptime(v, '%Y-%m-%d').date()
+ except ValueError:
+ raise ValueError(msg)
diff --git a/test/lib/ansible_test/_util/controller/sanity/yamllint/config/default.yml b/test/lib/ansible_test/_util/controller/sanity/yamllint/config/default.yml
new file mode 100644
index 0000000..45d8b7a
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/yamllint/config/default.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: {max-spaces-inside: 1, level: error}
+ brackets: {max-spaces-inside: 1, level: error}
+ colons: {max-spaces-after: -1, level: error}
+ commas: {max-spaces-after: -1, level: error}
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: {max: 3, level: error}
+ hyphens: {level: error}
+ indentation: disable
+ key-duplicates: enable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_util/controller/sanity/yamllint/config/modules.yml b/test/lib/ansible_test/_util/controller/sanity/yamllint/config/modules.yml
new file mode 100644
index 0000000..da7e604
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/yamllint/config/modules.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: disable
+ brackets: disable
+ colons: disable
+ commas: disable
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: disable
+ hyphens: disable
+ indentation: disable
+ key-duplicates: enable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_util/controller/sanity/yamllint/config/plugins.yml b/test/lib/ansible_test/_util/controller/sanity/yamllint/config/plugins.yml
new file mode 100644
index 0000000..6d41813
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/yamllint/config/plugins.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: disable
+ brackets: disable
+ colons: disable
+ commas: disable
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: disable
+ hyphens: disable
+ indentation: disable
+ key-duplicates: disable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py b/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py
new file mode 100644
index 0000000..d6de611
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/sanity/yamllint/yamllinter.py
@@ -0,0 +1,246 @@
+"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
+from __future__ import annotations
+
+import ast
+import json
+import os
+import re
+import sys
+import typing as t
+
+import yaml
+from yaml.resolver import Resolver
+from yaml.constructor import SafeConstructor
+from yaml.error import MarkedYAMLError
+from yaml.cyaml import CParser
+
+from yamllint import linter
+from yamllint.config import YamlLintConfig
+
+
+def main():
+ """Main program body."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ checker = YamlChecker()
+ checker.check(paths)
+ checker.report()
+
+
+class TestConstructor(SafeConstructor):
+ """Yaml Safe Constructor that knows about Ansible tags."""
+ def construct_yaml_unsafe(self, node):
+ """Construct an unsafe tag."""
+ try:
+ constructor = getattr(node, 'id', 'object')
+ if constructor is not None:
+ constructor = getattr(self, 'construct_%s' % constructor)
+ except AttributeError:
+ constructor = self.construct_object
+
+ value = constructor(node)
+
+ return value
+
+
+TestConstructor.add_constructor(
+ '!unsafe',
+ TestConstructor.construct_yaml_unsafe)
+
+
+TestConstructor.add_constructor(
+ '!vault',
+ TestConstructor.construct_yaml_str)
+
+
+TestConstructor.add_constructor(
+ '!vault-encrypted',
+ TestConstructor.construct_yaml_str)
+
+
+class TestLoader(CParser, TestConstructor, Resolver):
+ """Custom YAML loader that recognizes custom Ansible tags."""
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ TestConstructor.__init__(self)
+ Resolver.__init__(self)
+
+
+class YamlChecker:
+ """Wrapper around yamllint that supports YAML embedded in Ansible modules."""
+ def __init__(self):
+ self.messages = []
+
+ def report(self):
+ """Print yamllint report to stdout."""
+ report = dict(
+ messages=self.messages,
+ )
+
+ print(json.dumps(report, indent=4, sort_keys=True))
+
+ def check(self, paths): # type: (t.List[str]) -> None
+ """Check the specified paths."""
+ config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config')
+
+ yaml_conf = YamlLintConfig(file=os.path.join(config_path, 'default.yml'))
+ module_conf = YamlLintConfig(file=os.path.join(config_path, 'modules.yml'))
+ plugin_conf = YamlLintConfig(file=os.path.join(config_path, 'plugins.yml'))
+
+ for path in paths:
+ extension = os.path.splitext(path)[1]
+
+ with open(path, encoding='utf-8') as file:
+ contents = file.read()
+
+ if extension in ('.yml', '.yaml'):
+ self.check_yaml(yaml_conf, path, contents)
+ elif extension == '.py':
+ if path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/'):
+ conf = module_conf
+ else:
+ conf = plugin_conf
+
+ self.check_module(conf, path, contents)
+ else:
+ raise Exception('unsupported extension: %s' % extension)
+
+ def check_yaml(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
+ """Check the given YAML."""
+ self.check_parsable(path, contents)
+ self.messages += [self.result_to_message(r, path) for r in linter.run(contents, conf, path)]
+
+ def check_module(self, conf, path, contents): # type: (YamlLintConfig, str, str) -> None
+ """Check the given module."""
+ docs = self.get_module_docs(path, contents)
+
+ for key, value in docs.items():
+ yaml_data = value['yaml']
+ lineno = value['lineno']
+ fmt = value['fmt']
+
+ if fmt != 'yaml':
+ continue
+
+ if yaml_data.startswith('\n'):
+ yaml_data = yaml_data[1:]
+ lineno += 1
+
+ self.check_parsable(path, yaml_data, lineno)
+
+ messages = list(linter.run(yaml_data, conf, path))
+
+ self.messages += [self.result_to_message(r, path, lineno - 1, key) for r in messages]
+
+ def check_parsable(self, path, contents, lineno=1): # type: (str, str, int) -> None
+ """Check the given contents to verify they can be parsed as YAML."""
+ try:
+ yaml.load(contents, Loader=TestLoader)
+ except MarkedYAMLError as ex:
+ self.messages += [{'code': 'unparsable-with-libyaml',
+ 'message': '%s - %s' % (ex.args[0], ex.args[2]),
+ 'path': path,
+ 'line': ex.problem_mark.line + lineno,
+ 'column': ex.problem_mark.column + 1,
+ 'level': 'error',
+ }]
+
+ @staticmethod
+ def result_to_message(result, path, line_offset=0, prefix=''): # type: (t.Any, str, int, str) -> t.Dict[str, t.Any]
+ """Convert the given result to a dictionary and return it."""
+ if prefix:
+ prefix = '%s: ' % prefix
+
+ return dict(
+ code=result.rule or result.level,
+ message=prefix + result.desc,
+ path=path,
+ line=result.line + line_offset,
+ column=result.column,
+ level=result.level,
+ )
+
+ def get_module_docs(self, path, contents): # type: (str, str) -> t.Dict[str, t.Any]
+ """Return the module documentation for the given module contents."""
+ module_doc_types = [
+ 'DOCUMENTATION',
+ 'EXAMPLES',
+ 'RETURN',
+ ]
+
+ docs = {}
+
+ fmt_re = re.compile(r'^# fmt:\s+(\S+)')
+
+ def check_assignment(statement, doc_types=None):
+ """Check the given statement for a documentation assignment."""
+ for target in statement.targets:
+ if not isinstance(target, ast.Name):
+ continue
+
+ if doc_types and target.id not in doc_types:
+ continue
+
+ fmt_match = fmt_re.match(statement.value.s.lstrip())
+ fmt = 'yaml'
+ if fmt_match:
+ fmt = fmt_match.group(1)
+
+ docs[target.id] = dict(
+ yaml=statement.value.s,
+ lineno=statement.lineno,
+ end_lineno=statement.lineno + len(statement.value.s.splitlines()),
+ fmt=fmt.lower(),
+ )
+
+ module_ast = self.parse_module(path, contents)
+
+ if not module_ast:
+ return {}
+
+ is_plugin = path.startswith('lib/ansible/modules/') or path.startswith('lib/ansible/plugins/') or path.startswith('plugins/')
+ is_doc_fragment = path.startswith('lib/ansible/plugins/doc_fragments/') or path.startswith('plugins/doc_fragments/')
+
+ if is_plugin and not is_doc_fragment:
+ for body_statement in module_ast.body:
+ if isinstance(body_statement, ast.Assign):
+ check_assignment(body_statement, module_doc_types)
+ elif is_doc_fragment:
+ for body_statement in module_ast.body:
+ if isinstance(body_statement, ast.ClassDef):
+ for class_statement in body_statement.body:
+ if isinstance(class_statement, ast.Assign):
+ check_assignment(class_statement)
+ else:
+ raise Exception('unsupported path: %s' % path)
+
+ return docs
+
+ def parse_module(self, path, contents): # type: (str, str) -> t.Optional[ast.Module]
+ """Parse the given contents and return a module if successful, otherwise return None."""
+ try:
+ return ast.parse(contents)
+ except SyntaxError as ex:
+ self.messages.append(dict(
+ code='python-syntax-error',
+ message=str(ex),
+ path=path,
+ line=ex.lineno,
+ column=ex.offset,
+ level='error',
+ ))
+ except Exception as ex: # pylint: disable=broad-except
+ self.messages.append(dict(
+ code='python-parse-error',
+ message=str(ex),
+ path=path,
+ line=0,
+ column=0,
+ level='error',
+ ))
+
+ return None
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/tools/collection_detail.py b/test/lib/ansible_test/_util/controller/tools/collection_detail.py
new file mode 100644
index 0000000..870ea59
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/tools/collection_detail.py
@@ -0,0 +1,94 @@
+"""Retrieve collection detail."""
+from __future__ import annotations
+
+import json
+import os
+import re
+import sys
+
+import yaml
+
+
+# See semantic versioning specification (https://semver.org/)
+NUMERIC_IDENTIFIER = r'(?:0|[1-9][0-9]*)'
+ALPHANUMERIC_IDENTIFIER = r'(?:[0-9]*[a-zA-Z-][a-zA-Z0-9-]*)'
+
+PRE_RELEASE_IDENTIFIER = r'(?:' + NUMERIC_IDENTIFIER + r'|' + ALPHANUMERIC_IDENTIFIER + r')'
+BUILD_IDENTIFIER = r'[a-zA-Z0-9-]+' # equivalent to r'(?:[0-9]+|' + ALPHANUMERIC_IDENTIFIER + r')'
+
+VERSION_CORE = NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER
+PRE_RELEASE = r'(?:-' + PRE_RELEASE_IDENTIFIER + r'(?:\.' + PRE_RELEASE_IDENTIFIER + r')*)?'
+BUILD = r'(?:\+' + BUILD_IDENTIFIER + r'(?:\.' + BUILD_IDENTIFIER + r')*)?'
+
+SEMVER_REGULAR_EXPRESSION = r'^' + VERSION_CORE + PRE_RELEASE + BUILD + r'$'
+
+
+def validate_version(version):
+ """Raise exception if the provided version is not None or a valid semantic version."""
+ if version is None:
+ return
+ if not re.match(SEMVER_REGULAR_EXPRESSION, version):
+ raise Exception('Invalid version number "{0}". Collection version numbers must '
+ 'follow semantic versioning (https://semver.org/).'.format(version))
+
+
+def read_manifest_json(collection_path):
+ """Return collection information from the MANIFEST.json file."""
+ manifest_path = os.path.join(collection_path, 'MANIFEST.json')
+
+ if not os.path.exists(manifest_path):
+ return None
+
+ try:
+ with open(manifest_path, encoding='utf-8') as manifest_file:
+ manifest = json.load(manifest_file)
+
+ collection_info = manifest.get('collection_info') or {}
+
+ result = dict(
+ version=collection_info.get('version'),
+ )
+ validate_version(result['version'])
+ except Exception as ex: # pylint: disable=broad-except
+ raise Exception('{0}: {1}'.format(os.path.basename(manifest_path), ex))
+
+ return result
+
+
+def read_galaxy_yml(collection_path):
+ """Return collection information from the galaxy.yml file."""
+ galaxy_path = os.path.join(collection_path, 'galaxy.yml')
+
+ if not os.path.exists(galaxy_path):
+ return None
+
+ try:
+ with open(galaxy_path, encoding='utf-8') as galaxy_file:
+ galaxy = yaml.safe_load(galaxy_file)
+
+ result = dict(
+ version=galaxy.get('version'),
+ )
+ validate_version(result['version'])
+ except Exception as ex: # pylint: disable=broad-except
+ raise Exception('{0}: {1}'.format(os.path.basename(galaxy_path), ex))
+
+ return result
+
+
+def main():
+ """Retrieve collection detail."""
+ collection_path = sys.argv[1]
+
+ try:
+ result = read_manifest_json(collection_path) or read_galaxy_yml(collection_path) or {}
+ except Exception as ex: # pylint: disable=broad-except
+ result = dict(
+ error='{0}'.format(ex),
+ )
+
+ print(json.dumps(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/tools/coverage_stub.ps1 b/test/lib/ansible_test/_util/controller/tools/coverage_stub.ps1
new file mode 100644
index 0000000..fcc4570
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/tools/coverage_stub.ps1
@@ -0,0 +1,40 @@
+<#
+.SYNOPSIS
+Gets the lines to hit from a sourcefile for coverage stubs.
+#>
+[CmdletBinding()]
+param (
+ [Parameter(Mandatory, ValueFromRemainingArguments)]
+ [String[]]
+ $Path
+)
+
+$stubInfo = @(
+ foreach ($sourcePath in $Path) {
+ # Default is to just no lines for missing files
+ [Collections.Generic.HashSet[int]]$lines = @()
+
+ if (Test-Path -LiteralPath $sourcePath) {
+ $code = [ScriptBlock]::Create([IO.File]::ReadAllText($sourcePath))
+
+ # We set our breakpoints with this predicate so our stubs should match
+ # that logic.
+ $predicate = {
+ $args[0] -is [System.Management.Automation.Language.CommandBaseAst]
+ }
+ $cmds = $code.Ast.FindAll($predicate, $true)
+
+ # We only care about unique lines not multiple commands on 1 line.
+ $lines = @(foreach ($cmd in $cmds) {
+ $cmd.Extent.StartLineNumber
+ })
+ }
+
+ [PSCustomObject]@{
+ Path = $sourcePath
+ Lines = $lines
+ }
+ }
+)
+
+ConvertTo-Json -InputObject $stubInfo -Depth 2 -Compress
diff --git a/test/lib/ansible_test/_util/controller/tools/sslcheck.py b/test/lib/ansible_test/_util/controller/tools/sslcheck.py
new file mode 100644
index 0000000..c25fed6
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/tools/sslcheck.py
@@ -0,0 +1,22 @@
+"""Show openssl version."""
+from __future__ import annotations
+
+import json
+
+# noinspection PyBroadException
+try:
+ from ssl import OPENSSL_VERSION_INFO
+ VERSION = list(OPENSSL_VERSION_INFO[:3])
+except Exception: # pylint: disable=broad-except
+ VERSION = None
+
+
+def main():
+ """Main program entry point."""
+ print(json.dumps(dict(
+ version=VERSION,
+ )))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py b/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py
new file mode 100644
index 0000000..e2a15bf
--- /dev/null
+++ b/test/lib/ansible_test/_util/controller/tools/yaml_to_json.py
@@ -0,0 +1,27 @@
+"""Read YAML from stdin and write JSON to stdout."""
+from __future__ import annotations
+
+import datetime
+import json
+import sys
+
+from yaml import load
+
+try:
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+# unique ISO date marker matching the one present in importer.py
+ISO_DATE_MARKER = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
+
+
+def default(value):
+ """Custom default serializer which supports datetime.date types."""
+ if isinstance(value, datetime.date):
+ return '%s%s' % (ISO_DATE_MARKER, value.isoformat())
+
+ raise TypeError('cannot serialize type: %s' % type(value))
+
+
+json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout, default=default)
diff --git a/test/lib/ansible_test/_util/target/__init__.py b/test/lib/ansible_test/_util/target/__init__.py
new file mode 100644
index 0000000..527d413
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/__init__.py
@@ -0,0 +1,2 @@
+# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x.
+# This allows the ansible-test entry point to report supported Python versions before exiting.
diff --git a/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py b/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py
new file mode 100755
index 0000000..930654f
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/cli/ansible_test_cli_stub.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# PYTHON_ARGCOMPLETE_OK
+"""Command line entry point for ansible-test."""
+
+# NOTE: This file resides in the _util/target directory to ensure compatibility with all supported Python versions.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main(args=None):
+ """Main program entry point."""
+ ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ source_root = os.path.join(ansible_root, 'test', 'lib')
+
+ if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', '__init__.py')):
+ # running from source, use that version of ansible-test instead of any version that may already be installed
+ sys.path.insert(0, source_root)
+
+ # noinspection PyProtectedMember
+ from ansible_test._util.target.common.constants import CONTROLLER_PYTHON_VERSIONS
+
+ if version_to_str(sys.version_info[:2]) not in CONTROLLER_PYTHON_VERSIONS:
+ raise SystemExit('This version of ansible-test cannot be executed with Python version %s. Supported Python versions are: %s' % (
+ version_to_str(sys.version_info[:3]), ', '.join(CONTROLLER_PYTHON_VERSIONS)))
+
+ if any(not os.get_blocking(handle.fileno()) for handle in (sys.stdin, sys.stdout, sys.stderr)):
+ raise SystemExit('Standard input, output and error file handles must be blocking to run ansible-test.')
+
+ # noinspection PyProtectedMember
+ from ansible_test._internal import main as cli_main
+
+ cli_main(args)
+
+
+def version_to_str(version):
+ """Return a version string from a version tuple."""
+ return '.'.join(str(n) for n in version)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/target/common/__init__.py b/test/lib/ansible_test/_util/target/common/__init__.py
new file mode 100644
index 0000000..527d413
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/common/__init__.py
@@ -0,0 +1,2 @@
+# Empty __init__.py to allow importing of `ansible_test._util.target.common` under Python 2.x.
+# This allows the ansible-test entry point to report supported Python versions before exiting.
diff --git a/test/lib/ansible_test/_util/target/common/constants.py b/test/lib/ansible_test/_util/target/common/constants.py
new file mode 100644
index 0000000..9bddfaf
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/common/constants.py
@@ -0,0 +1,20 @@
+"""Constants used by ansible-test's CLI entry point (as well as the rest of ansible-test). Imports should not be used in this file."""
+
+# NOTE: This file resides in the _util/target directory to ensure compatibility with all supported Python versions.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+REMOTE_ONLY_PYTHON_VERSIONS = (
+ '2.7',
+ '3.5',
+ '3.6',
+ '3.7',
+ '3.8',
+)
+
+CONTROLLER_PYTHON_VERSIONS = (
+ '3.9',
+ '3.10',
+ '3.11',
+)
diff --git a/test/lib/ansible_test/_util/target/injector/python.py b/test/lib/ansible_test/_util/target/injector/python.py
new file mode 100644
index 0000000..c1e88a9
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/injector/python.py
@@ -0,0 +1,83 @@
+# auto-shebang
+"""Provides an entry point for python scripts and python modules on the controller with the current python interpreter and optional code coverage collection."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ name = os.path.basename(__file__)
+ args = [sys.executable]
+
+ coverage_config = os.environ.get('COVERAGE_CONF')
+ coverage_output = os.environ.get('COVERAGE_FILE')
+
+ if coverage_config:
+ if coverage_output:
+ args += ['-m', 'coverage.__main__', 'run', '--rcfile', coverage_config]
+ else:
+ if sys.version_info >= (3, 4):
+ # noinspection PyUnresolvedReferences
+ import importlib.util
+
+ # noinspection PyUnresolvedReferences
+ found = bool(importlib.util.find_spec('coverage'))
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ try:
+ # noinspection PyDeprecation
+ imp.find_module('coverage')
+ found = True
+ except ImportError:
+ found = False
+
+ if not found:
+ sys.exit('ERROR: Could not find `coverage` module. '
+ 'Did you use a virtualenv created without --system-site-packages or with the wrong interpreter?')
+
+ if name == 'python.py':
+ if sys.argv[1] == '-c':
+ # prevent simple misuse of python.py with -c which does not work with coverage
+ sys.exit('ERROR: Use `python -c` instead of `python.py -c` to avoid errors when code coverage is collected.')
+ elif name == 'pytest':
+ args += ['-m', 'pytest']
+ elif name == 'importer.py':
+ args += [find_program(name, False)]
+ else:
+ args += [find_program(name, True)]
+
+ args += sys.argv[1:]
+
+ os.execv(args[0], args)
+
+
+def find_program(name, executable): # type: (str, bool) -> str
+ """
+ Find and return the full path to the named program, optionally requiring it to be executable.
+ Raises an exception if the program is not found.
+ """
+ path = os.environ.get('PATH', os.path.defpath)
+ seen = set([os.path.abspath(__file__)])
+ mode = os.F_OK | os.X_OK if executable else os.F_OK
+
+ for base in path.split(os.path.pathsep):
+ candidate = os.path.abspath(os.path.join(base, name))
+
+ if candidate in seen:
+ continue
+
+ seen.add(candidate)
+
+ if os.path.exists(candidate) and os.access(candidate, mode):
+ return candidate
+
+ raise Exception('Executable "%s" not found in path: %s' % (name, path))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/target/injector/virtualenv.sh b/test/lib/ansible_test/_util/target/injector/virtualenv.sh
new file mode 100644
index 0000000..5dcbe0e
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/injector/virtualenv.sh
@@ -0,0 +1,14 @@
+# shellcheck shell=bash
+# Create and activate a fresh virtual environment with `source virtualenv.sh`.
+
+rm -rf "${OUTPUT_DIR}/venv"
+
+# Try to use 'venv' if it is available, then fallback to 'virtualenv' since some systems provide 'venv' although it is non-functional.
+if [[ "${ANSIBLE_TEST_PYTHON_VERSION}" =~ ^2\. ]] || ! "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m venv --system-site-packages "${OUTPUT_DIR}/venv" > /dev/null 2>&1; then
+ rm -rf "${OUTPUT_DIR}/venv"
+ "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m virtualenv --system-site-packages --python "${ANSIBLE_TEST_PYTHON_INTERPRETER}" "${OUTPUT_DIR}/venv"
+fi
+
+set +ux
+source "${OUTPUT_DIR}/venv/bin/activate"
+set -ux
diff --git a/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_collections.py b/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_collections.py
new file mode 100644
index 0000000..fefd6b0
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_collections.py
@@ -0,0 +1,70 @@
+"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself
+ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections')
+
+# set by ansible-test to the minimum python version supported on the controller
+ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION = tuple(int(x) for x in os.environ['ANSIBLE_CONTROLLER_MIN_PYTHON_VERSION'].split('.'))
+
+
+# this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0
+# NB: this code should never run under py2
+def collection_resolve_package_path(path):
+ """Configure the Python package path so that pytest can find our collections."""
+ for parent in path.parents:
+ if str(parent) == ANSIBLE_COLLECTIONS_PATH:
+ return parent
+
+ raise Exception('File "%s" not found in collection path "%s".' % (path, ANSIBLE_COLLECTIONS_PATH))
+
+
+# this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0
+def collection_pypkgpath(self):
+ """Configure the Python package path so that pytest can find our collections."""
+ for parent in self.parts(reverse=True):
+ if str(parent) == ANSIBLE_COLLECTIONS_PATH:
+ return parent
+
+ raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH))
+
+
+def pytest_configure():
+ """Configure this pytest plugin."""
+ try:
+ if pytest_configure.executed:
+ return
+ except AttributeError:
+ pytest_configure.executed = True
+
+ # noinspection PyProtectedMember
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+
+ # allow unit tests to import code from collections
+
+ # noinspection PyProtectedMember
+ _AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access
+
+ try:
+ # noinspection PyProtectedMember
+ from _pytest import pathlib as _pytest_pathlib
+ except ImportError:
+ _pytest_pathlib = None
+
+ if hasattr(_pytest_pathlib, 'resolve_package_path'):
+ _pytest_pathlib.resolve_package_path = collection_resolve_package_path
+ else:
+ # looks like pytest <= 6.0.0, use the old hack against py.path
+ # noinspection PyProtectedMember
+ import py._path.local
+
+ # force collections unit tests to be loaded with the ansible_collections namespace
+ # original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552
+ # noinspection PyProtectedMember
+ py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access
+
+
+pytest_configure()
diff --git a/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_coverage.py b/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_coverage.py
new file mode 100644
index 0000000..b05298a
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/pytest/plugins/ansible_pytest_coverage.py
@@ -0,0 +1,68 @@
+"""Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`. PYTEST_DONT_REWRITE"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def pytest_configure():
+ """Configure this pytest plugin."""
+ try:
+ if pytest_configure.executed:
+ return
+ except AttributeError:
+ pytest_configure.executed = True
+
+ try:
+ import coverage
+ except ImportError:
+ coverage = None
+
+ try:
+ coverage.Coverage
+ except AttributeError:
+ coverage = None
+
+ if not coverage:
+ return
+
+ import gc
+ import os
+
+ coverage_instances = []
+
+ for obj in gc.get_objects():
+ if isinstance(obj, coverage.Coverage):
+ coverage_instances.append(obj)
+
+ if not coverage_instances:
+ coverage_config = os.environ.get('COVERAGE_CONF')
+
+ if not coverage_config:
+ return
+
+ coverage_output = os.environ.get('COVERAGE_FILE')
+
+ if not coverage_output:
+ return
+
+ cov = coverage.Coverage(config_file=coverage_config)
+ coverage_instances.append(cov)
+ else:
+ cov = None
+
+ # noinspection PyProtectedMember
+ os_exit = os._exit # pylint: disable=protected-access
+
+ def coverage_exit(*args, **kwargs):
+ for instance in coverage_instances:
+ instance.stop()
+ instance.save()
+
+ os_exit(*args, **kwargs)
+
+ os._exit = coverage_exit # pylint: disable=protected-access
+
+ if cov:
+ cov.start()
+
+
+pytest_configure()
diff --git a/test/lib/ansible_test/_util/target/sanity/compile/compile.py b/test/lib/ansible_test/_util/target/sanity/compile/compile.py
new file mode 100644
index 0000000..bd2446f
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/sanity/compile/compile.py
@@ -0,0 +1,56 @@
+"""Python syntax checker with lint friendly output."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+ENCODING = 'utf-8'
+ERRORS = 'replace'
+Text = type(u'')
+
+
+def main():
+ """Main program entry point."""
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ compile_source(path)
+
+
+def compile_source(path):
+ """Compile the specified source file, printing an error if one occurs."""
+ with open(path, 'rb') as source_fd:
+ source = source_fd.read()
+
+ try:
+ compile(source, path, 'exec', dont_inherit=True)
+ except SyntaxError as ex:
+ extype, message, lineno, offset = type(ex), ex.text, ex.lineno, ex.offset
+ except BaseException as ex: # pylint: disable=broad-except
+ extype, message, lineno, offset = type(ex), str(ex), 0, 0
+ else:
+ return
+
+ # In some situations offset can be None. This can happen for syntax errors on Python 2.6
+ # (__future__ import following after a regular import).
+ offset = offset or 0
+
+ result = "%s:%d:%d: %s: %s" % (path, lineno, offset, extype.__name__, safe_message(message))
+
+ if sys.version_info <= (3,):
+ result = result.encode(ENCODING, ERRORS)
+
+ print(result)
+
+
+def safe_message(value):
+ """Given an input value as text or bytes, return the first non-empty line as text, ensuring it can be round-tripped as UTF-8."""
+ if isinstance(value, Text):
+ value = value.encode(ENCODING, ERRORS)
+
+ value = value.decode(ENCODING, ERRORS)
+ value = value.strip().splitlines()[0].strip()
+
+ return value
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/target/sanity/import/importer.py b/test/lib/ansible_test/_util/target/sanity/import/importer.py
new file mode 100644
index 0000000..44a5ddc
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/sanity/import/importer.py
@@ -0,0 +1,573 @@
+"""Import the given python module(s) and report error(s) encountered."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def main():
+ """
+ Main program function used to isolate globals from imported code.
+ Changes to globals in imported modules on Python 2.x will overwrite our own globals.
+ """
+ import os
+ import sys
+ import types
+
+ # preload an empty ansible._vendor module to prevent use of any embedded modules during the import test
+ vendor_module_name = 'ansible._vendor'
+
+ vendor_module = types.ModuleType(vendor_module_name)
+ vendor_module.__file__ = os.path.join(os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-8]), 'lib/ansible/_vendor/__init__.py')
+ vendor_module.__path__ = []
+ vendor_module.__package__ = vendor_module_name
+
+ sys.modules[vendor_module_name] = vendor_module
+
+ import ansible
+ import contextlib
+ import datetime
+ import json
+ import re
+ import runpy
+ import subprocess
+ import traceback
+ import warnings
+
+ ansible_path = os.path.dirname(os.path.dirname(ansible.__file__))
+ temp_path = os.environ['SANITY_TEMP_PATH'] + os.path.sep
+ external_python = os.environ.get('SANITY_EXTERNAL_PYTHON')
+ yaml_to_json_path = os.environ.get('SANITY_YAML_TO_JSON')
+ collection_full_name = os.environ.get('SANITY_COLLECTION_FULL_NAME')
+ collection_root = os.environ.get('ANSIBLE_COLLECTIONS_PATH')
+ import_type = os.environ.get('SANITY_IMPORTER_TYPE')
+
+ try:
+ # noinspection PyCompatibility
+ from importlib import import_module
+ except ImportError:
+ def import_module(name, package=None): # type: (str, str | None) -> types.ModuleType
+ assert package is None
+ __import__(name)
+ return sys.modules[name]
+
+ from io import BytesIO, TextIOWrapper
+
+ try:
+ from importlib.util import spec_from_loader, module_from_spec
+ from importlib.machinery import SourceFileLoader, ModuleSpec # pylint: disable=unused-import
+ except ImportError:
+ has_py3_loader = False
+ else:
+ has_py3_loader = True
+
+ if collection_full_name:
+ # allow importing code from collections when testing a collection
+ from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native, text_type
+
+ # noinspection PyProtectedMember
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+ from ansible.utils.collection_loader import _collection_finder
+
+ yaml_to_dict_cache = {}
+
+ # unique ISO date marker matching the one present in yaml_to_json.py
+ iso_date_marker = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
+ iso_date_re = re.compile('^%s([0-9]{4})-([0-9]{2})-([0-9]{2})$' % iso_date_marker)
+
+ def parse_value(value):
+ """Custom value parser for JSON deserialization that recognizes our internal ISO date format."""
+ if isinstance(value, text_type):
+ match = iso_date_re.search(value)
+
+ if match:
+ value = datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3)))
+
+ return value
+
+ def object_hook(data):
+ """Object hook for custom ISO date deserialization from JSON."""
+ return dict((key, parse_value(value)) for key, value in data.items())
+
+ def yaml_to_dict(yaml, content_id):
+ """
+ Return a Python dict version of the provided YAML.
+ Conversion is done in a subprocess since the current Python interpreter does not have access to PyYAML.
+ """
+ if content_id in yaml_to_dict_cache:
+ return yaml_to_dict_cache[content_id]
+
+ try:
+ cmd = [external_python, yaml_to_json_path]
+ proc = subprocess.Popen([to_bytes(c) for c in cmd], # pylint: disable=consider-using-with
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout_bytes, stderr_bytes = proc.communicate(to_bytes(yaml))
+
+ if proc.returncode != 0:
+ raise Exception('command %s failed with return code %d: %s' % ([to_native(c) for c in cmd], proc.returncode, to_native(stderr_bytes)))
+
+ data = yaml_to_dict_cache[content_id] = json.loads(to_text(stdout_bytes), object_hook=object_hook)
+
+ return data
+ except Exception as ex:
+ raise Exception('internal importer error - failed to parse yaml: %s' % to_native(ex))
+
+ _collection_finder._meta_yml_to_dict = yaml_to_dict # pylint: disable=protected-access
+
+ collection_loader = _AnsibleCollectionFinder(paths=[collection_root])
+ # noinspection PyProtectedMember
+ collection_loader._install() # pylint: disable=protected-access
+ else:
+ # do not support collection loading when not testing a collection
+ collection_loader = None
+
+ if collection_loader and import_type == 'plugin':
+ # do not unload ansible code for collection plugin (not module) tests
+ # doing so could result in the collection loader being initialized multiple times
+ pass
+ else:
+ # remove all modules under the ansible package, except the preloaded vendor module
+ list(map(sys.modules.pop, [m for m in sys.modules if m.partition('.')[0] == ansible.__name__ and m != vendor_module_name]))
+
+ if import_type == 'module':
+ # pre-load an empty ansible package to prevent unwanted code in __init__.py from loading
+ # this more accurately reflects the environment that AnsiballZ runs modules under
+ # it also avoids issues with imports in the ansible package that are not allowed
+ ansible_module = types.ModuleType(ansible.__name__)
+ ansible_module.__file__ = ansible.__file__
+ ansible_module.__path__ = ansible.__path__
+ ansible_module.__package__ = ansible.__package__
+
+ sys.modules[ansible.__name__] = ansible_module
+
+ class ImporterAnsibleModuleException(Exception):
+ """Exception thrown during initialization of ImporterAnsibleModule."""
+
+ class ImporterAnsibleModule:
+ """Replacement for AnsibleModule to support import testing."""
+ def __init__(self, *args, **kwargs):
+ raise ImporterAnsibleModuleException()
+
+ class RestrictedModuleLoader:
+ """Python module loader that restricts inappropriate imports."""
+ def __init__(self, path, name, restrict_to_module_paths):
+ self.path = path
+ self.name = name
+ self.loaded_modules = set()
+ self.restrict_to_module_paths = restrict_to_module_paths
+
+ def find_spec(self, fullname, path=None, target=None): # pylint: disable=unused-argument
+ # type: (RestrictedModuleLoader, str, list[str], types.ModuleType | None ) -> ModuleSpec | None | ImportError
+ """Return the spec from the loader or None"""
+ loader = self._get_loader(fullname, path=path)
+ if loader is not None:
+ if has_py3_loader:
+ # loader is expected to be Optional[importlib.abc.Loader], but RestrictedModuleLoader does not inherit from importlib.abc.Loder
+ return spec_from_loader(fullname, loader) # type: ignore[arg-type]
+ raise ImportError("Failed to import '%s' due to a bug in ansible-test. Check importlib imports for typos." % fullname)
+ return None
+
+ def find_module(self, fullname, path=None):
+ # type: (RestrictedModuleLoader, str, list[str]) -> RestrictedModuleLoader | None
+ """Return self if the given fullname is restricted, otherwise return None."""
+ return self._get_loader(fullname, path=path)
+
+ def _get_loader(self, fullname, path=None):
+ # type: (RestrictedModuleLoader, str, list[str]) -> RestrictedModuleLoader | None
+ """Return self if the given fullname is restricted, otherwise return None."""
+ if fullname in self.loaded_modules:
+ return None # ignore modules that are already being loaded
+
+ if is_name_in_namepace(fullname, ['ansible']):
+ if not self.restrict_to_module_paths:
+ return None # for non-modules, everything in the ansible namespace is allowed
+
+ if fullname in ('ansible.module_utils.basic',):
+ return self # intercept loading so we can modify the result
+
+ if is_name_in_namepace(fullname, ['ansible.module_utils', self.name]):
+ return None # module_utils and module under test are always allowed
+
+ if any(os.path.exists(candidate_path) for candidate_path in convert_ansible_name_to_absolute_paths(fullname)):
+ return self # restrict access to ansible files that exist
+
+ return None # ansible file does not exist, do not restrict access
+
+ if is_name_in_namepace(fullname, ['ansible_collections']):
+ if not collection_loader:
+ return self # restrict access to collections when we are not testing a collection
+
+ if not self.restrict_to_module_paths:
+ return None # for non-modules, everything in the ansible namespace is allowed
+
+ if is_name_in_namepace(fullname, ['ansible_collections...plugins.module_utils', self.name]):
+ return None # module_utils and module under test are always allowed
+
+ if collection_loader.find_module(fullname, path):
+ return self # restrict access to collection files that exist
+
+ return None # collection file does not exist, do not restrict access
+
+ # not a namespace we care about
+ return None
+
+ def create_module(self, spec): # pylint: disable=unused-argument
+ # type: (RestrictedModuleLoader, ModuleSpec) -> None
+ """Return None to use default module creation."""
+ return None
+
+ def exec_module(self, module):
+ # type: (RestrictedModuleLoader, types.ModuleType) -> None | ImportError
+ """Execute the module if the name is ansible.module_utils.basic and otherwise raise an ImportError"""
+ fullname = module.__spec__.name
+ if fullname == 'ansible.module_utils.basic':
+ self.loaded_modules.add(fullname)
+ for path in convert_ansible_name_to_absolute_paths(fullname):
+ if not os.path.exists(path):
+ continue
+ loader = SourceFileLoader(fullname, path)
+ spec = spec_from_loader(fullname, loader)
+ real_module = module_from_spec(spec)
+ loader.exec_module(real_module)
+ real_module.AnsibleModule = ImporterAnsibleModule # type: ignore[attr-defined]
+ real_module._load_params = lambda *args, **kwargs: {} # type: ignore[attr-defined] # pylint: disable=protected-access
+ sys.modules[fullname] = real_module
+ return None
+ raise ImportError('could not find "%s"' % fullname)
+ raise ImportError('import of "%s" is not allowed in this context' % fullname)
+
+ def load_module(self, fullname):
+ # type: (RestrictedModuleLoader, str) -> types.ModuleType | ImportError
+ """Return the module if the name is ansible.module_utils.basic and otherwise raise an ImportError."""
+ if fullname == 'ansible.module_utils.basic':
+ module = self.__load_module(fullname)
+
+ # stop Ansible module execution during AnsibleModule instantiation
+ module.AnsibleModule = ImporterAnsibleModule # type: ignore[attr-defined]
+ # no-op for _load_params since it may be called before instantiating AnsibleModule
+ module._load_params = lambda *args, **kwargs: {} # type: ignore[attr-defined] # pylint: disable=protected-access
+
+ return module
+
+ raise ImportError('import of "%s" is not allowed in this context' % fullname)
+
+ def __load_module(self, fullname):
+ # type: (RestrictedModuleLoader, str) -> types.ModuleType
+ """Load the requested module while avoiding infinite recursion."""
+ self.loaded_modules.add(fullname)
+ return import_module(fullname)
+
+ def run(restrict_to_module_paths):
+ """Main program function."""
+ base_dir = os.getcwd()
+ messages = set()
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ name = convert_relative_path_to_name(path)
+ test_python_module(path, name, base_dir, messages, restrict_to_module_paths)
+
+ if messages:
+ sys.exit(10)
+
+ def test_python_module(path, name, base_dir, messages, restrict_to_module_paths):
+ """Test the given python module by importing it.
+ :type path: str
+ :type name: str
+ :type base_dir: str
+ :type messages: set[str]
+ :type restrict_to_module_paths: bool
+ """
+ if name in sys.modules:
+ return # cannot be tested because it has already been loaded
+
+ is_ansible_module = (path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/')) and os.path.basename(path) != '__init__.py'
+ run_main = is_ansible_module
+
+ if path == 'lib/ansible/modules/async_wrapper.py':
+ # async_wrapper is a non-standard Ansible module (does not use AnsibleModule) so we cannot test the main function
+ run_main = False
+
+ capture_normal = Capture()
+ capture_main = Capture()
+
+ run_module_ok = False
+
+ try:
+ with monitor_sys_modules(path, messages):
+ with restrict_imports(path, name, messages, restrict_to_module_paths):
+ with capture_output(capture_normal):
+ import_module(name)
+
+ if run_main:
+ run_module_ok = is_ansible_module
+
+ with monitor_sys_modules(path, messages):
+ with restrict_imports(path, name, messages, restrict_to_module_paths):
+ with capture_output(capture_main):
+ runpy.run_module(name, run_name='__main__', alter_sys=True)
+ except ImporterAnsibleModuleException:
+ # module instantiated AnsibleModule without raising an exception
+ if not run_module_ok:
+ if is_ansible_module:
+ report_message(path, 0, 0, 'module-guard', "AnsibleModule instantiation not guarded by `if __name__ == '__main__'`", messages)
+ else:
+ report_message(path, 0, 0, 'non-module', "AnsibleModule instantiated by import of non-module", messages)
+ except BaseException as ex: # pylint: disable=locally-disabled, broad-except
+ # intentionally catch all exceptions, including calls to sys.exit
+ exc_type, _exc, exc_tb = sys.exc_info()
+ message = str(ex)
+ results = list(reversed(traceback.extract_tb(exc_tb)))
+ line = 0
+ offset = 0
+ full_path = os.path.join(base_dir, path)
+ base_path = base_dir + os.path.sep
+ source = None
+
+ # avoid line wraps in messages
+ message = re.sub(r'\n *', ': ', message)
+
+ for result in results:
+ if result[0] == full_path:
+ # save the line number for the file under test
+ line = result[1] or 0
+
+ if not source and result[0].startswith(base_path) and not result[0].startswith(temp_path):
+ # save the first path and line number in the traceback which is in our source tree
+ source = (os.path.relpath(result[0], base_path), result[1] or 0, 0)
+
+ if isinstance(ex, SyntaxError):
+ # SyntaxError has better information than the traceback
+ if ex.filename == full_path: # pylint: disable=locally-disabled, no-member
+ # syntax error was reported in the file under test
+ line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
+ offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
+ elif ex.filename.startswith(base_path) and not ex.filename.startswith(temp_path): # pylint: disable=locally-disabled, no-member
+ # syntax error was reported in our source tree
+ source = (os.path.relpath(ex.filename, base_path), ex.lineno or 0, ex.offset or 0) # pylint: disable=locally-disabled, no-member
+
+ # remove the filename and line number from the message
+ # either it was extracted above, or it's not really useful information
+ message = re.sub(r' \(.*?, line [0-9]+\)$', '', message)
+
+ if source and source[0] != path:
+ message += ' (at %s:%d:%d)' % (source[0], source[1], source[2])
+
+ report_message(path, line, offset, 'traceback', '%s: %s' % (exc_type.__name__, message), messages)
+ finally:
+ capture_report(path, capture_normal, messages)
+ capture_report(path, capture_main, messages)
+
+ def is_name_in_namepace(name, namespaces):
+ """Returns True if the given name is one of the given namespaces, otherwise returns False."""
+ name_parts = name.split('.')
+
+ for namespace in namespaces:
+ namespace_parts = namespace.split('.')
+ length = min(len(name_parts), len(namespace_parts))
+
+ truncated_name = name_parts[0:length]
+ truncated_namespace = namespace_parts[0:length]
+
+ # empty parts in the namespace are treated as wildcards
+ # to simplify the comparison, use those empty parts to indicate the positions in the name to be empty as well
+ for idx, part in enumerate(truncated_namespace):
+ if not part:
+ truncated_name[idx] = part
+
+ # example: name=ansible, allowed_name=ansible.module_utils
+ # example: name=ansible.module_utils.system.ping, allowed_name=ansible.module_utils
+ if truncated_name == truncated_namespace:
+ return True
+
+ return False
+
+ def check_sys_modules(path, before, messages):
+ """Check for unwanted changes to sys.modules.
+ :type path: str
+ :type before: dict[str, module]
+ :type messages: set[str]
+ """
+ after = sys.modules
+ removed = set(before.keys()) - set(after.keys())
+ changed = set(key for key, value in before.items() if key in after and value != after[key])
+
+ # additions are checked by our custom PEP 302 loader, so we don't need to check them again here
+
+ for module in sorted(removed):
+ report_message(path, 0, 0, 'unload', 'unloading of "%s" in sys.modules is not supported' % module, messages)
+
+ for module in sorted(changed):
+ report_message(path, 0, 0, 'reload', 'reloading of "%s" in sys.modules is not supported' % module, messages)
+
+ def convert_ansible_name_to_absolute_paths(name):
+ """Calculate the module path from the given name.
+ :type name: str
+ :rtype: list[str]
+ """
+ return [
+ os.path.join(ansible_path, name.replace('.', os.path.sep)),
+ os.path.join(ansible_path, name.replace('.', os.path.sep)) + '.py',
+ ]
+
+ def convert_relative_path_to_name(path):
+ """Calculate the module name from the given path.
+ :type path: str
+ :rtype: str
+ """
+ if path.endswith('/__init__.py'):
+ clean_path = os.path.dirname(path)
+ else:
+ clean_path = path
+
+ clean_path = os.path.splitext(clean_path)[0]
+
+ name = clean_path.replace(os.path.sep, '.')
+
+ if collection_loader:
+ # when testing collections the relative paths (and names) being tested are within the collection under test
+ name = 'ansible_collections.%s.%s' % (collection_full_name, name)
+ else:
+ # when testing ansible all files being imported reside under the lib directory
+ name = name[len('lib/'):]
+
+ return name
+
+ class Capture:
+ """Captured output and/or exception."""
+ def __init__(self):
+ # use buffered IO to simulate StringIO; allows Ansible's stream patching to behave without warnings
+ self.stdout = TextIOWrapper(BytesIO())
+ self.stderr = TextIOWrapper(BytesIO())
+
+ def capture_report(path, capture, messages):
+ """Report on captured output.
+ :type path: str
+ :type capture: Capture
+ :type messages: set[str]
+ """
+ # since we're using buffered IO, flush before checking for data
+ capture.stdout.flush()
+ capture.stderr.flush()
+ stdout_value = capture.stdout.buffer.getvalue()
+ if stdout_value:
+ first = stdout_value.decode().strip().splitlines()[0].strip()
+ report_message(path, 0, 0, 'stdout', first, messages)
+
+ stderr_value = capture.stderr.buffer.getvalue()
+ if stderr_value:
+ first = stderr_value.decode().strip().splitlines()[0].strip()
+ report_message(path, 0, 0, 'stderr', first, messages)
+
+ def report_message(path, line, column, code, message, messages):
+ """Report message if not already reported.
+ :type path: str
+ :type line: int
+ :type column: int
+ :type code: str
+ :type message: str
+ :type messages: set[str]
+ """
+ message = '%s:%d:%d: %s: %s' % (path, line, column, code, message)
+
+ if message not in messages:
+ messages.add(message)
+ print(message)
+
+ @contextlib.contextmanager
+ def restrict_imports(path, name, messages, restrict_to_module_paths):
+ """Restrict available imports.
+ :type path: str
+ :type name: str
+ :type messages: set[str]
+ :type restrict_to_module_paths: bool
+ """
+ restricted_loader = RestrictedModuleLoader(path, name, restrict_to_module_paths)
+
+ # noinspection PyTypeChecker
+ sys.meta_path.insert(0, restricted_loader)
+ sys.path_importer_cache.clear()
+
+ try:
+ yield
+ finally:
+ if import_type == 'plugin' and not collection_loader:
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+ _AnsibleCollectionFinder._remove() # pylint: disable=protected-access
+
+ if sys.meta_path[0] != restricted_loader:
+ report_message(path, 0, 0, 'metapath', 'changes to sys.meta_path[0] are not permitted', messages)
+
+ while restricted_loader in sys.meta_path:
+ # noinspection PyTypeChecker
+ sys.meta_path.remove(restricted_loader)
+
+ sys.path_importer_cache.clear()
+
+ @contextlib.contextmanager
+ def monitor_sys_modules(path, messages):
+ """Monitor sys.modules for unwanted changes, reverting any additions made to our own namespaces."""
+ snapshot = sys.modules.copy()
+
+ try:
+ yield
+ finally:
+ check_sys_modules(path, snapshot, messages)
+
+ for key in set(sys.modules.keys()) - set(snapshot.keys()):
+ if is_name_in_namepace(key, ('ansible', 'ansible_collections')):
+ del sys.modules[key] # only unload our own code since we know it's native Python
+
+ @contextlib.contextmanager
+ def capture_output(capture):
+ """Capture sys.stdout and sys.stderr.
+ :type capture: Capture
+ """
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+
+ sys.stdout = capture.stdout
+ sys.stderr = capture.stderr
+
+ # clear all warnings registries to make all warnings available
+ for module in sys.modules.values():
+ try:
+ # noinspection PyUnresolvedReferences
+ module.__warningregistry__.clear()
+ except AttributeError:
+ pass
+
+ with warnings.catch_warnings():
+ warnings.simplefilter('error')
+
+ if collection_loader and import_type == 'plugin':
+ warnings.filterwarnings(
+ "ignore",
+ "AnsibleCollectionFinder has already been configured")
+
+ if sys.version_info[0] == 2:
+ warnings.filterwarnings(
+ "ignore",
+ "Python 2 is no longer supported by the Python core team. Support for it is now deprecated in cryptography,"
+ " and will be removed in a future release.")
+ warnings.filterwarnings(
+ "ignore",
+ "Python 2 is no longer supported by the Python core team. Support for it is now deprecated in cryptography,"
+ " and will be removed in the next release.")
+
+ if sys.version_info[:2] == (3, 5):
+ warnings.filterwarnings(
+ "ignore",
+ "Python 3.5 support will be dropped in the next release ofcryptography. Please upgrade your Python.")
+ warnings.filterwarnings(
+ "ignore",
+ "Python 3.5 support will be dropped in the next release of cryptography. Please upgrade your Python.")
+
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+
+ run(import_type == 'module')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1 b/test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1
new file mode 100644
index 0000000..7cc86ab
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/ConfigureRemotingForAnsible.ps1
@@ -0,0 +1,435 @@
+#Requires -Version 3.0
+
+# Configure a Windows host for remote management with Ansible
+# -----------------------------------------------------------
+#
+# This script checks the current WinRM (PS Remoting) configuration and makes
+# the necessary changes to allow Ansible to connect, authenticate and
+# execute PowerShell commands.
+#
+# IMPORTANT: This script uses self-signed certificates and authentication mechanisms
+# that are intended for development environments and evaluation purposes only.
+# Production environments and deployments that are exposed on the network should
+# use CA-signed certificates and secure authentication mechanisms such as Kerberos.
+#
+# To run this script in Powershell:
+#
+# [Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
+# $url = "https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1"
+# $file = "$env:temp\ConfigureRemotingForAnsible.ps1"
+#
+# (New-Object -TypeName System.Net.WebClient).DownloadFile($url, $file)
+#
+# powershell.exe -ExecutionPolicy ByPass -File $file
+#
+# All events are logged to the Windows EventLog, useful for unattended runs.
+#
+# Use option -Verbose in order to see the verbose output messages.
+#
+# Use option -CertValidityDays to specify how long this certificate is valid
+# starting from today. So you would specify -CertValidityDays 3650 to get
+# a 10-year valid certificate.
+#
+# Use option -ForceNewSSLCert if the system has been SysPreped and a new
+# SSL Certificate must be forced on the WinRM Listener when re-running this
+# script. This is necessary when a new SID and CN name is created.
+#
+# Use option -EnableCredSSP to enable CredSSP as an authentication option.
+#
+# Use option -DisableBasicAuth to disable basic authentication.
+#
+# Use option -SkipNetworkProfileCheck to skip the network profile check.
+# Without specifying this the script will only run if the device's interfaces
+# are in DOMAIN or PRIVATE zones. Provide this switch if you want to enable
+# WinRM on a device with an interface in PUBLIC zone.
+#
+# Use option -SubjectName to specify the CN name of the certificate. This
+# defaults to the system's hostname and generally should not be specified.
+
+# Written by Trond Hindenes <trond@hindenes.com>
+# Updated by Chris Church <cchurch@ansible.com>
+# Updated by Michael Crilly <mike@autologic.cm>
+# Updated by Anton Ouzounov <Anton.Ouzounov@careerbuilder.com>
+# Updated by Nicolas Simond <contact@nicolas-simond.com>
+# Updated by Dag Wieërs <dag@wieers.com>
+# Updated by Jordan Borean <jborean93@gmail.com>
+# Updated by Erwan Quélin <erwan.quelin@gmail.com>
+# Updated by David Norman <david@dkn.email>
+#
+# Version 1.0 - 2014-07-06
+# Version 1.1 - 2014-11-11
+# Version 1.2 - 2015-05-15
+# Version 1.3 - 2016-04-04
+# Version 1.4 - 2017-01-05
+# Version 1.5 - 2017-02-09
+# Version 1.6 - 2017-04-18
+# Version 1.7 - 2017-11-23
+# Version 1.8 - 2018-02-23
+# Version 1.9 - 2018-09-21
+
+# Support -Verbose option
+[CmdletBinding()]
+
+Param (
+ [string]$SubjectName = $env:COMPUTERNAME,
+ [int]$CertValidityDays = 1095,
+ [switch]$SkipNetworkProfileCheck,
+ $CreateSelfSignedCert = $true,
+ [switch]$ForceNewSSLCert,
+ [switch]$GlobalHttpFirewallAccess,
+ [switch]$DisableBasicAuth = $false,
+ [switch]$EnableCredSSP
+)
+
+Function Write-ProgressLog {
+ $Message = $args[0]
+ Write-EventLog -LogName Application -Source $EventSource -EntryType Information -EventId 1 -Message $Message
+}
+
+Function Write-VerboseLog {
+ $Message = $args[0]
+ Write-Verbose $Message
+ Write-ProgressLog $Message
+}
+
+Function Write-HostLog {
+ $Message = $args[0]
+ Write-Output $Message
+ Write-ProgressLog $Message
+}
+
+Function New-LegacySelfSignedCert {
+ Param (
+ [string]$SubjectName,
+ [int]$ValidDays = 1095
+ )
+
+ $hostnonFQDN = $env:computerName
+ $hostFQDN = [System.Net.Dns]::GetHostByName(($env:computerName)).Hostname
+ $SignatureAlgorithm = "SHA256"
+
+ $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1"
+ $name.Encode("CN=$SubjectName", 0)
+
+ $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1"
+ $key.ProviderName = "Microsoft Enhanced RSA and AES Cryptographic Provider"
+ $key.KeySpec = 1
+ $key.Length = 4096
+ $key.SecurityDescriptor = "D:PAI(A;;0xd01f01ff;;;SY)(A;;0xd01f01ff;;;BA)(A;;0x80120089;;;NS)"
+ $key.MachineContext = 1
+ $key.Create()
+
+ $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1"
+ $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1")
+ $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1"
+ $ekuoids.Add($serverauthoid)
+ $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1"
+ $ekuext.InitializeEncode($ekuoids)
+
+ $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1"
+ $cert.InitializeFromPrivateKey(2, $key, "")
+ $cert.Subject = $name
+ $cert.Issuer = $cert.Subject
+ $cert.NotBefore = (Get-Date).AddDays(-1)
+ $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays)
+
+ $SigOID = New-Object -ComObject X509Enrollment.CObjectId
+ $SigOID.InitializeFromValue(([Security.Cryptography.Oid]$SignatureAlgorithm).Value)
+
+ [string[]] $AlternativeName += $hostnonFQDN
+ $AlternativeName += $hostFQDN
+ $IAlternativeNames = New-Object -ComObject X509Enrollment.CAlternativeNames
+
+ foreach ($AN in $AlternativeName) {
+ $AltName = New-Object -ComObject X509Enrollment.CAlternativeName
+ $AltName.InitializeFromString(0x3, $AN)
+ $IAlternativeNames.Add($AltName)
+ }
+
+ $SubjectAlternativeName = New-Object -ComObject X509Enrollment.CX509ExtensionAlternativeNames
+ $SubjectAlternativeName.InitializeEncode($IAlternativeNames)
+
+ [String[]]$KeyUsage = ("DigitalSignature", "KeyEncipherment")
+ $KeyUsageObj = New-Object -ComObject X509Enrollment.CX509ExtensionKeyUsage
+ $KeyUsageObj.InitializeEncode([int][Security.Cryptography.X509Certificates.X509KeyUsageFlags]($KeyUsage))
+ $KeyUsageObj.Critical = $true
+
+ $cert.X509Extensions.Add($KeyUsageObj)
+ $cert.X509Extensions.Add($ekuext)
+ $cert.SignatureInformation.HashAlgorithm = $SigOID
+ $CERT.X509Extensions.Add($SubjectAlternativeName)
+ $cert.Encode()
+
+ $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1"
+ $enrollment.InitializeFromRequest($cert)
+ $certdata = $enrollment.CreateRequest(0)
+ $enrollment.InstallResponse(2, $certdata, 0, "")
+
+ # extract/return the thumbprint from the generated cert
+ $parsed_cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2
+ $parsed_cert.Import([System.Text.Encoding]::UTF8.GetBytes($certdata))
+
+ return $parsed_cert.Thumbprint
+}
+
+Function Enable-GlobalHttpFirewallAccess {
+ Write-Verbose "Forcing global HTTP firewall access"
+ # this is a fairly naive implementation; could be more sophisticated about rule matching/collapsing
+ $fw = New-Object -ComObject HNetCfg.FWPolicy2
+
+ # try to find/enable the default rule first
+ $add_rule = $false
+ $matching_rules = $fw.Rules | Where-Object { $_.Name -eq "Windows Remote Management (HTTP-In)" }
+ $rule = $null
+ If ($matching_rules) {
+ If ($matching_rules -isnot [Array]) {
+ Write-Verbose "Editing existing single HTTP firewall rule"
+ $rule = $matching_rules
+ }
+ Else {
+ # try to find one with the All or Public profile first
+ Write-Verbose "Found multiple existing HTTP firewall rules..."
+ $rule = $matching_rules | ForEach-Object { $_.Profiles -band 4 }[0]
+
+ If (-not $rule -or $rule -is [Array]) {
+ Write-Verbose "Editing an arbitrary single HTTP firewall rule (multiple existed)"
+ # oh well, just pick the first one
+ $rule = $matching_rules[0]
+ }
+ }
+ }
+
+ If (-not $rule) {
+ Write-Verbose "Creating a new HTTP firewall rule"
+ $rule = New-Object -ComObject HNetCfg.FWRule
+ $rule.Name = "Windows Remote Management (HTTP-In)"
+ $rule.Description = "Inbound rule for Windows Remote Management via WS-Management. [TCP 5985]"
+ $add_rule = $true
+ }
+
+ $rule.Profiles = 0x7FFFFFFF
+ $rule.Protocol = 6
+ $rule.LocalPorts = 5985
+ $rule.RemotePorts = "*"
+ $rule.LocalAddresses = "*"
+ $rule.RemoteAddresses = "*"
+ $rule.Enabled = $true
+ $rule.Direction = 1
+ $rule.Action = 1
+ $rule.Grouping = "Windows Remote Management"
+
+ If ($add_rule) {
+ $fw.Rules.Add($rule)
+ }
+
+ Write-Verbose "HTTP firewall rule $($rule.Name) updated"
+}
+
+# Setup error handling.
+Trap {
+ $_
+ Exit 1
+}
+$ErrorActionPreference = "Stop"
+
+# Get the ID and security principal of the current user account
+$myWindowsID = [System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal = new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole = [System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if (-Not $myWindowsPrincipal.IsInRole($adminRole)) {
+ Write-Output "ERROR: You need elevated Administrator privileges in order to run this script."
+ Write-Output " Start Windows PowerShell by using the Run as Administrator option."
+ Exit 2
+}
+
+$EventSource = $MyInvocation.MyCommand.Name
+If (-Not $EventSource) {
+ $EventSource = "Powershell CLI"
+}
+
+If ([System.Diagnostics.EventLog]::Exists('Application') -eq $False -or [System.Diagnostics.EventLog]::SourceExists($EventSource) -eq $False) {
+ New-EventLog -LogName Application -Source $EventSource
+}
+
+# Detect PowerShell version.
+If ($PSVersionTable.PSVersion.Major -lt 3) {
+ Write-ProgressLog "PowerShell version 3 or higher is required."
+ Throw "PowerShell version 3 or higher is required."
+}
+
+# Find and start the WinRM service.
+Write-Verbose "Verifying WinRM service."
+If (!(Get-Service "WinRM")) {
+ Write-ProgressLog "Unable to find the WinRM service."
+ Throw "Unable to find the WinRM service."
+}
+ElseIf ((Get-Service "WinRM").Status -ne "Running") {
+ Write-Verbose "Setting WinRM service to start automatically on boot."
+ Set-Service -Name "WinRM" -StartupType Automatic
+ Write-ProgressLog "Set WinRM service to start automatically on boot."
+ Write-Verbose "Starting WinRM service."
+ Start-Service -Name "WinRM" -ErrorAction Stop
+ Write-ProgressLog "Started WinRM service."
+
+}
+
+# WinRM should be running; check that we have a PS session config.
+If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener))) {
+ If ($SkipNetworkProfileCheck) {
+ Write-Verbose "Enabling PS Remoting without checking Network profile."
+ Enable-PSRemoting -SkipNetworkProfileCheck -Force -ErrorAction Stop
+ Write-ProgressLog "Enabled PS Remoting without checking Network profile."
+ }
+ Else {
+ Write-Verbose "Enabling PS Remoting."
+ Enable-PSRemoting -Force -ErrorAction Stop
+ Write-ProgressLog "Enabled PS Remoting."
+ }
+}
+Else {
+ Write-Verbose "PS Remoting is already enabled."
+}
+
+# Ensure LocalAccountTokenFilterPolicy is set to 1
+# https://github.com/ansible/ansible/issues/42978
+$token_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System"
+$token_prop_name = "LocalAccountTokenFilterPolicy"
+$token_key = Get-Item -Path $token_path
+$token_value = $token_key.GetValue($token_prop_name, $null)
+if ($token_value -ne 1) {
+ Write-Verbose "Setting LocalAccountTOkenFilterPolicy to 1"
+ if ($null -ne $token_value) {
+ Remove-ItemProperty -Path $token_path -Name $token_prop_name
+ }
+ New-ItemProperty -Path $token_path -Name $token_prop_name -Value 1 -PropertyType DWORD > $null
+}
+
+# Make sure there is a SSL listener.
+$listeners = Get-ChildItem WSMan:\localhost\Listener
+If (!($listeners | Where-Object { $_.Keys -like "TRANSPORT=HTTPS" })) {
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ # Create the hashtables of settings to be used.
+ $valueset = @{
+ Hostname = $SubjectName
+ CertificateThumbprint = $thumbprint
+ }
+
+ $selectorset = @{
+ Transport = "HTTPS"
+ Address = "*"
+ }
+
+ Write-Verbose "Enabling SSL listener."
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ Write-ProgressLog "Enabled SSL listener."
+}
+Else {
+ Write-Verbose "SSL listener is already active."
+
+ # Force a new SSL cert on Listener if the $ForceNewSSLCert
+ If ($ForceNewSSLCert) {
+
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ $valueset = @{
+ CertificateThumbprint = $thumbprint
+ Hostname = $SubjectName
+ }
+
+ # Delete the listener for SSL
+ $selectorset = @{
+ Address = "*"
+ Transport = "HTTPS"
+ }
+ Remove-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset
+
+ # Add new Listener with new SSL cert
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ }
+}
+
+# Check for basic authentication.
+$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object { $_.Name -eq "Basic" }
+
+If ($DisableBasicAuth) {
+ If (($basicAuthSetting.Value) -eq $true) {
+ Write-Verbose "Disabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $false
+ Write-ProgressLog "Disabled basic auth support."
+ }
+ Else {
+ Write-Verbose "Basic auth is already disabled."
+ }
+}
+Else {
+ If (($basicAuthSetting.Value) -eq $false) {
+ Write-Verbose "Enabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true
+ Write-ProgressLog "Enabled basic auth support."
+ }
+ Else {
+ Write-Verbose "Basic auth is already enabled."
+ }
+}
+
+# If EnableCredSSP if set to true
+If ($EnableCredSSP) {
+ # Check for CredSSP authentication
+ $credsspAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object { $_.Name -eq "CredSSP" }
+ If (($credsspAuthSetting.Value) -eq $false) {
+ Write-Verbose "Enabling CredSSP auth support."
+ Enable-WSManCredSSP -role server -Force
+ Write-ProgressLog "Enabled CredSSP auth support."
+ }
+}
+
+If ($GlobalHttpFirewallAccess) {
+ Enable-GlobalHttpFirewallAccess
+}
+
+# Configure firewall to allow WinRM HTTPS connections.
+$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS"
+$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any
+If ($fwtest1.count -lt 5) {
+ Write-Verbose "Adding firewall rule to allow WinRM HTTPS."
+ netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow
+ Write-ProgressLog "Added firewall rule to allow WinRM HTTPS."
+}
+ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5)) {
+ Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile."
+ netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any
+ Write-ProgressLog "Updated firewall rule to allow WinRM HTTPS for any profile."
+}
+Else {
+ Write-Verbose "Firewall rule already exists to allow WinRM HTTPS."
+}
+
+# Test a remoting connection to localhost, which should work.
+$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock { $using:env:COMPUTERNAME } -ErrorVariable httpError -ErrorAction SilentlyContinue
+$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck
+
+$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue
+
+If ($httpResult -and $httpsResult) {
+ Write-Verbose "HTTP: Enabled | HTTPS: Enabled"
+}
+ElseIf ($httpsResult -and !$httpResult) {
+ Write-Verbose "HTTP: Disabled | HTTPS: Enabled"
+}
+ElseIf ($httpResult -and !$httpsResult) {
+ Write-Verbose "HTTP: Enabled | HTTPS: Disabled"
+}
+Else {
+ Write-ProgressLog "Unable to establish an HTTP or HTTPS remoting session."
+ Throw "Unable to establish an HTTP or HTTPS remoting session."
+}
+Write-VerboseLog "PS Remoting has been successfully configured for Ansible."
diff --git a/test/lib/ansible_test/_util/target/setup/bootstrap.sh b/test/lib/ansible_test/_util/target/setup/bootstrap.sh
new file mode 100644
index 0000000..732c122
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/bootstrap.sh
@@ -0,0 +1,450 @@
+# shellcheck shell=sh
+
+set -eu
+
+install_ssh_keys()
+{
+ if [ ! -f "${ssh_private_key_path}" ]; then
+ # write public/private ssh key pair
+ public_key_path="${ssh_private_key_path}.pub"
+
+ # shellcheck disable=SC2174
+ mkdir -m 0700 -p "${ssh_path}"
+ touch "${public_key_path}" "${ssh_private_key_path}"
+ chmod 0600 "${public_key_path}" "${ssh_private_key_path}"
+ echo "${ssh_public_key}" > "${public_key_path}"
+ echo "${ssh_private_key}" > "${ssh_private_key_path}"
+
+ # add public key to authorized_keys
+ authoried_keys_path="${HOME}/.ssh/authorized_keys"
+
+ # the existing file is overwritten to avoid conflicts (ex: RHEL on EC2 blocks root login)
+ cat "${public_key_path}" > "${authoried_keys_path}"
+ chmod 0600 "${authoried_keys_path}"
+
+ # add localhost's server keys to known_hosts
+ known_hosts_path="${HOME}/.ssh/known_hosts"
+
+ for key in /etc/ssh/ssh_host_*_key.pub; do
+ echo "localhost $(cat "${key}")" >> "${known_hosts_path}"
+ done
+ fi
+}
+
+customize_bashrc()
+{
+ true > ~/.bashrc
+
+ # Show color `ls` results when available.
+ if ls --color > /dev/null 2>&1; then
+ echo "alias ls='ls --color'" >> ~/.bashrc
+ elif ls -G > /dev/null 2>&1; then
+ echo "alias ls='ls -G'" >> ~/.bashrc
+ fi
+
+ # Improve shell prompts for interactive use.
+ echo "export PS1='\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '" >> ~/.bashrc
+}
+
+install_pip() {
+ if ! "${python_interpreter}" -m pip.__main__ --version --disable-pip-version-check 2>/dev/null; then
+ case "${python_version}" in
+ "2.7")
+ pip_bootstrap_url="https://ci-files.testing.ansible.com/ansible-test/get-pip-20.3.4.py"
+ ;;
+ *)
+ pip_bootstrap_url="https://ci-files.testing.ansible.com/ansible-test/get-pip-21.3.1.py"
+ ;;
+ esac
+
+ while true; do
+ curl --silent --show-error "${pip_bootstrap_url}" -o /tmp/get-pip.py && \
+ "${python_interpreter}" /tmp/get-pip.py --disable-pip-version-check --quiet && \
+ rm /tmp/get-pip.py \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+ fi
+}
+
+pip_install() {
+ pip_packages="$1"
+
+ while true; do
+ # shellcheck disable=SC2086
+ "${python_interpreter}" -m pip install --disable-pip-version-check ${pip_packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+}
+
+bootstrap_remote_alpine()
+{
+ py_pkg_prefix="py3"
+
+ packages="
+ acl
+ bash
+ gcc
+ python3-dev
+ ${py_pkg_prefix}-pip
+ sudo
+ "
+
+ if [ "${controller}" ]; then
+ packages="
+ ${packages}
+ ${py_pkg_prefix}-cryptography
+ ${py_pkg_prefix}-packaging
+ ${py_pkg_prefix}-yaml
+ ${py_pkg_prefix}-jinja2
+ ${py_pkg_prefix}-resolvelib
+ "
+ fi
+
+ while true; do
+ # shellcheck disable=SC2086
+ apk add -q ${packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+}
+
+bootstrap_remote_fedora()
+{
+ py_pkg_prefix="python3"
+
+ packages="
+ acl
+ gcc
+ ${py_pkg_prefix}-devel
+ "
+
+ if [ "${controller}" ]; then
+ packages="
+ ${packages}
+ ${py_pkg_prefix}-cryptography
+ ${py_pkg_prefix}-jinja2
+ ${py_pkg_prefix}-packaging
+ ${py_pkg_prefix}-pyyaml
+ ${py_pkg_prefix}-resolvelib
+ "
+ fi
+
+ while true; do
+ # shellcheck disable=SC2086
+ dnf install -q -y ${packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+}
+
+bootstrap_remote_freebsd()
+{
+ packages="
+ python${python_package_version}
+ py${python_package_version}-sqlite3
+ bash
+ curl
+ gtar
+ sudo
+ "
+
+ if [ "${controller}" ]; then
+ jinja2_pkg="py${python_package_version}-jinja2"
+ cryptography_pkg="py${python_package_version}-cryptography"
+ pyyaml_pkg="py${python_package_version}-yaml"
+
+ # Declare platform/python version combinations which do not have supporting OS packages available.
+ # For these combinations ansible-test will use pip to install the requirements instead.
+ case "${platform_version}/${python_version}" in
+ *)
+ jinja2_pkg="" # not available
+ cryptography_pkg="" # not available
+ pyyaml_pkg="" # not available
+ ;;
+ esac
+
+ packages="
+ ${packages}
+ libyaml
+ ${pyyaml_pkg}
+ ${jinja2_pkg}
+ ${cryptography_pkg}
+ "
+ fi
+
+ while true; do
+ # shellcheck disable=SC2086
+ env ASSUME_ALWAYS_YES=YES pkg bootstrap && \
+ pkg install -q -y ${packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+
+ if ! grep '^PermitRootLogin yes$' /etc/ssh/sshd_config > /dev/null; then
+ sed -i '' 's/^# *PermitRootLogin.*$/PermitRootLogin yes/;' /etc/ssh/sshd_config
+ service sshd restart
+ fi
+
+ # make additional wheels available for packages which lack them for this platform
+ echo "# generated by ansible-test
+[global]
+extra-index-url = https://spare-tire.testing.ansible.com/simple/
+prefer-binary = yes
+" > /etc/pip.conf
+
+ # enable ACL support on the root filesystem (required for become between unprivileged users)
+ fs_path="/"
+ fs_device="$(mount -v "${fs_path}" | cut -w -f 1)"
+ # shellcheck disable=SC2001
+ fs_device_escaped=$(echo "${fs_device}" | sed 's|/|\\/|g')
+
+ mount -o acls "${fs_device}" "${fs_path}"
+ awk 'BEGIN{FS=" "}; /'"${fs_device_escaped}"'/ {gsub(/^rw$/,"rw,acls", $4); print; next} // {print}' /etc/fstab > /etc/fstab.new
+ mv /etc/fstab.new /etc/fstab
+
+ # enable sudo without a password for the wheel group, allowing ansible to use the sudo become plugin
+ echo '%wheel ALL=(ALL:ALL) NOPASSWD: ALL' > /usr/local/etc/sudoers.d/ansible-test
+}
+
+bootstrap_remote_macos()
+{
+ # Silence macOS deprecation warning for bash.
+ echo "export BASH_SILENCE_DEPRECATION_WARNING=1" >> ~/.bashrc
+
+ # Make sure ~/ansible/ is the starting directory for interactive shells on the control node.
+ # The root home directory is under a symlink. Without this the real path will be displayed instead.
+ if [ "${controller}" ]; then
+ echo "cd ~/ansible/" >> ~/.bashrc
+ fi
+
+ # Make sure commands like 'brew' can be found.
+ # This affects users with the 'zsh' shell, as well as 'root' accessed using 'sudo' from a user with 'zsh' for a shell.
+ # shellcheck disable=SC2016
+ echo 'PATH="/usr/local/bin:$PATH"' > /etc/zshenv
+}
+
+bootstrap_remote_rhel_7()
+{
+ packages="
+ gcc
+ python-devel
+ python-virtualenv
+ "
+
+ while true; do
+ # shellcheck disable=SC2086
+ yum install -q -y ${packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+
+ bootstrap_remote_rhel_pinned_pip_packages
+}
+
+bootstrap_remote_rhel_8()
+{
+ if [ "${python_version}" = "3.6" ]; then
+ py_pkg_prefix="python3"
+ else
+ py_pkg_prefix="python${python_package_version}"
+ fi
+
+ packages="
+ gcc
+ ${py_pkg_prefix}-devel
+ "
+
+ # Jinja2 is not installed with an OS package since the provided version is too old.
+ # Instead, ansible-test will install it using pip.
+ if [ "${controller}" ]; then
+ packages="
+ ${packages}
+ ${py_pkg_prefix}-cryptography
+ "
+ fi
+
+ while true; do
+ # shellcheck disable=SC2086
+ yum module install -q -y "python${python_package_version}" && \
+ yum install -q -y ${packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ bootstrap_remote_rhel_pinned_pip_packages
+}
+
+bootstrap_remote_rhel_9()
+{
+ py_pkg_prefix="python3"
+
+ packages="
+ gcc
+ ${py_pkg_prefix}-devel
+ "
+
+ # Jinja2 is not installed with an OS package since the provided version is too old.
+ # Instead, ansible-test will install it using pip.
+ if [ "${controller}" ]; then
+ packages="
+ ${packages}
+ ${py_pkg_prefix}-cryptography
+ ${py_pkg_prefix}-packaging
+ ${py_pkg_prefix}-pyyaml
+ ${py_pkg_prefix}-resolvelib
+ "
+ fi
+
+ while true; do
+ # shellcheck disable=SC2086
+ dnf install -q -y ${packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+}
+
+bootstrap_remote_rhel()
+{
+ case "${platform_version}" in
+ 7.*) bootstrap_remote_rhel_7 ;;
+ 8.*) bootstrap_remote_rhel_8 ;;
+ 9.*) bootstrap_remote_rhel_9 ;;
+ esac
+}
+
+bootstrap_remote_rhel_pinned_pip_packages()
+{
+ # pin packaging and pyparsing to match the downstream vendored versions
+ pip_packages="
+ packaging==20.4
+ pyparsing==2.4.7
+ "
+
+ pip_install "${pip_packages}"
+}
+
+bootstrap_remote_ubuntu()
+{
+ py_pkg_prefix="python3"
+
+ packages="
+ acl
+ gcc
+ python${python_version}-dev
+ python3-pip
+ python${python_version}-venv
+ "
+
+ if [ "${controller}" ]; then
+ cryptography_pkg="${py_pkg_prefix}-cryptography"
+ jinja2_pkg="${py_pkg_prefix}-jinja2"
+ packaging_pkg="${py_pkg_prefix}-packaging"
+ pyyaml_pkg="${py_pkg_prefix}-yaml"
+ resolvelib_pkg="${py_pkg_prefix}-resolvelib"
+
+ # Declare platforms which do not have supporting OS packages available.
+ # For these ansible-test will use pip to install the requirements instead.
+ # Only the platform is checked since Ubuntu shares Python packages across Python versions.
+ case "${platform_version}" in
+ "20.04")
+ jinja2_pkg="" # too old
+ resolvelib_pkg="" # not available
+ ;;
+ esac
+
+ packages="
+ ${packages}
+ ${cryptography_pkg}
+ ${jinja2_pkg}
+ ${packaging_pkg}
+ ${pyyaml_pkg}
+ ${resolvelib_pkg}
+ "
+ fi
+
+ while true; do
+ # shellcheck disable=SC2086
+ apt-get update -qq -y && \
+ DEBIAN_FRONTEND=noninteractive apt-get install -qq -y --no-install-recommends ${packages} \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ if [ "${controller}" ]; then
+ if [ "${platform_version}/${python_version}" = "20.04/3.9" ]; then
+ # Install pyyaml using pip so libyaml support is available on Python 3.9.
+ # The OS package install (which is installed by default) only has a .so file for Python 3.8.
+ pip_install "--upgrade pyyaml"
+ fi
+ fi
+}
+
+bootstrap_docker()
+{
+ # Required for newer mysql-server packages to install/upgrade on Ubuntu 16.04.
+ rm -f /usr/sbin/policy-rc.d
+}
+
+bootstrap_remote()
+{
+ for python_version in ${python_versions}; do
+ echo "Bootstrapping Python ${python_version}"
+
+ python_interpreter="python${python_version}"
+ python_package_version="$(echo "${python_version}" | tr -d '.')"
+
+ case "${platform}" in
+ "alpine") bootstrap_remote_alpine ;;
+ "fedora") bootstrap_remote_fedora ;;
+ "freebsd") bootstrap_remote_freebsd ;;
+ "macos") bootstrap_remote_macos ;;
+ "rhel") bootstrap_remote_rhel ;;
+ "ubuntu") bootstrap_remote_ubuntu ;;
+ esac
+ done
+}
+
+bootstrap()
+{
+ ssh_path="${HOME}/.ssh"
+ ssh_private_key_path="${ssh_path}/id_${ssh_key_type}"
+
+ install_ssh_keys
+ customize_bashrc
+
+ # allow tests to detect ansible-test bootstrapped instances, as well as the bootstrap type
+ echo "${bootstrap_type}" > /etc/ansible-test.bootstrap
+
+ case "${bootstrap_type}" in
+ "docker") bootstrap_docker ;;
+ "remote") bootstrap_remote ;;
+ esac
+}
+
+# These variables will be templated before sending the script to the host.
+# They are at the end of the script to maintain line numbers for debugging purposes.
+bootstrap_type=#{bootstrap_type}
+controller=#{controller}
+platform=#{platform}
+platform_version=#{platform_version}
+python_versions=#{python_versions}
+ssh_key_type=#{ssh_key_type}
+ssh_private_key=#{ssh_private_key}
+ssh_public_key=#{ssh_public_key}
+
+bootstrap
diff --git a/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh b/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh
new file mode 100644
index 0000000..3b05a3f
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/check_systemd_cgroup_v1.sh
@@ -0,0 +1,17 @@
+# shellcheck shell=sh
+
+set -eu
+
+>&2 echo "@MARKER@"
+
+cgroup_path="$(awk -F: '$2 ~ /^name=systemd$/ { print "/sys/fs/cgroup/systemd"$3 }' /proc/1/cgroup)"
+
+if [ "${cgroup_path}" ] && [ -d "${cgroup_path}" ]; then
+ probe_path="${cgroup_path%/}/ansible-test-probe-@LABEL@"
+ mkdir "${probe_path}"
+ rmdir "${probe_path}"
+ exit 0
+fi
+
+>&2 echo "No systemd cgroup v1 hierarchy found"
+exit 1
diff --git a/test/lib/ansible_test/_util/target/setup/probe_cgroups.py b/test/lib/ansible_test/_util/target/setup/probe_cgroups.py
new file mode 100644
index 0000000..2ac7ecb
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/probe_cgroups.py
@@ -0,0 +1,31 @@
+"""A tool for probing cgroups to determine write access."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import sys
+
+
+def main(): # type: () -> None
+ """Main program entry point."""
+ probe_dir = sys.argv[1]
+ paths = sys.argv[2:]
+ results = {}
+
+ for path in paths:
+ probe_path = os.path.join(path, probe_dir)
+
+ try:
+ os.mkdir(probe_path)
+ os.rmdir(probe_path)
+ except Exception as ex: # pylint: disable=broad-except
+ results[path] = str(ex)
+ else:
+ results[path] = None
+
+ print(json.dumps(results, sort_keys=True))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/target/setup/quiet_pip.py b/test/lib/ansible_test/_util/target/setup/quiet_pip.py
new file mode 100644
index 0000000..54f0f86
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/quiet_pip.py
@@ -0,0 +1,72 @@
+"""Custom entry-point for pip that filters out unwanted logging and warnings."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os
+import re
+import runpy
+import sys
+import warnings
+
+BUILTIN_FILTERER_FILTER = logging.Filterer.filter
+
+LOGGING_MESSAGE_FILTER = re.compile("^("
+ ".*Running pip install with root privileges is generally not a good idea.*|" # custom Fedora patch [1]
+ ".*Running pip as the 'root' user can result in broken permissions .*|" # pip 21.1
+ "DEPRECATION: Python 2.7 will reach the end of its life .*|" # pip 19.2.3
+ "Ignoring .*: markers .* don't match your environment|"
+ "Looking in indexes: .*|" # pypi-test-container
+ "Requirement already satisfied.*"
+ ")$")
+
+# [1] https://src.fedoraproject.org/rpms/python-pip/blob/f34/f/emit-a-warning-when-running-with-root-privileges.patch
+
+WARNING_MESSAGE_FILTERS = (
+ # DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained.
+ # pip 21.0 will drop support for Python 2.7 in January 2021.
+ # More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support
+ 'DEPRECATION: Python 2.7 reached the end of its life ',
+
+ # DEPRECATION: Python 3.5 reached the end of its life on September 13th, 2020. Please upgrade your Python as Python 3.5 is no longer maintained.
+ # pip 21.0 will drop support for Python 3.5 in January 2021. pip 21.0 will remove support for this functionality.
+ 'DEPRECATION: Python 3.5 reached the end of its life ',
+)
+
+
+def custom_filterer_filter(self, record):
+ """Globally omit logging of unwanted messages."""
+ if LOGGING_MESSAGE_FILTER.search(record.getMessage()):
+ return 0
+
+ return BUILTIN_FILTERER_FILTER(self, record)
+
+
+def main():
+ """Main program entry point."""
+ # Filtering logging output globally avoids having to intercept stdout/stderr.
+ # It also avoids problems with loss of color output and mixing up the order of stdout/stderr messages.
+ logging.Filterer.filter = custom_filterer_filter
+
+ for message_filter in WARNING_MESSAGE_FILTERS:
+ # Setting filterwarnings in code is necessary because of the following:
+ # Python 2.7 cannot use the -W option to match warning text after a colon. This makes it impossible to match specific warning messages.
+ warnings.filterwarnings('ignore', message_filter)
+
+ get_pip = os.environ.get('GET_PIP')
+
+ try:
+ if get_pip:
+ directory, filename = os.path.split(get_pip)
+ module = os.path.splitext(filename)[0]
+ sys.path.insert(0, directory)
+ runpy.run_module(module, run_name='__main__', alter_sys=True)
+ else:
+ runpy.run_module('pip.__main__', run_name='__main__', alter_sys=True)
+ except ImportError as ex:
+ print('pip is unavailable: %s' % ex)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/target/setup/requirements.py b/test/lib/ansible_test/_util/target/setup/requirements.py
new file mode 100644
index 0000000..4fe9a6c
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/setup/requirements.py
@@ -0,0 +1,337 @@
+"""A tool for installing test requirements on the controller and target host."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# pylint: disable=wrong-import-position
+
+import resource
+
+# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
+# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
+SOFT_RLIMIT_NOFILE = 1024
+
+CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
+DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
+
+if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
+ resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
+ CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
+
+import base64
+import contextlib
+import errno
+import io
+import json
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+try:
+ import typing as t
+except ImportError:
+ t = None
+
+try:
+ from shlex import quote as cmd_quote
+except ImportError:
+ # noinspection PyProtectedMember
+ from pipes import quote as cmd_quote
+
+try:
+ from urllib.request import urlopen
+except ImportError:
+ # noinspection PyCompatibility,PyUnresolvedReferences
+ from urllib2 import urlopen # pylint: disable=ansible-bad-import-from
+
+ENCODING = 'utf-8'
+
+Text = type(u'')
+
+VERBOSITY = 0
+CONSOLE = sys.stderr
+
+
+def main(): # type: () -> None
+ """Main program entry point."""
+ global VERBOSITY # pylint: disable=global-statement
+
+ payload = json.loads(to_text(base64.b64decode(PAYLOAD)))
+
+ VERBOSITY = payload['verbosity']
+
+ script = payload['script']
+ commands = payload['commands']
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-test-', suffix='-pip.py') as pip:
+ pip.write(to_bytes(script))
+ pip.flush()
+
+ for name, options in commands:
+ try:
+ globals()[name](pip.name, options)
+ except ApplicationError as ex:
+ print(ex)
+ sys.exit(1)
+
+
+# noinspection PyUnusedLocal
+def bootstrap(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
+ """Bootstrap pip and related packages in an empty virtual environment."""
+ pip_version = options['pip_version']
+ packages = options['packages']
+
+ url = 'https://ci-files.testing.ansible.com/ansible-test/get-pip-%s.py' % pip_version
+ cache_path = os.path.expanduser('~/.ansible/test/cache/get_pip_%s.py' % pip_version.replace(".", "_"))
+ temp_path = cache_path + '.download'
+
+ if os.path.exists(cache_path):
+ log('Using cached pip %s bootstrap script: %s' % (pip_version, cache_path))
+ else:
+ log('Downloading pip %s bootstrap script: %s' % (pip_version, url))
+
+ make_dirs(os.path.dirname(cache_path))
+
+ try:
+ download_file(url, temp_path)
+ except Exception as ex:
+ raise ApplicationError(('''
+Download failed: %s
+
+The bootstrap script can be manually downloaded and saved to: %s
+
+If you're behind a proxy, consider commenting on the following GitHub issue:
+
+https://github.com/ansible/ansible/issues/77304
+''' % (ex, cache_path)).strip())
+
+ shutil.move(temp_path, cache_path)
+
+ log('Cached pip %s bootstrap script: %s' % (pip_version, cache_path))
+
+ env = common_pip_environment()
+ env.update(GET_PIP=cache_path)
+
+ options = common_pip_options()
+ options.extend(packages)
+
+ command = [sys.executable, pip] + options
+
+ execute_command(command, env=env)
+
+
+def install(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
+ """Perform a pip install."""
+ requirements = options['requirements']
+ constraints = options['constraints']
+ packages = options['packages']
+
+ tempdir = tempfile.mkdtemp(prefix='ansible-test-', suffix='-requirements')
+
+ try:
+ options = common_pip_options()
+ options.extend(packages)
+
+ for path, content in requirements:
+ write_text_file(os.path.join(tempdir, path), content, True)
+ options.extend(['-r', path])
+
+ for path, content in constraints:
+ write_text_file(os.path.join(tempdir, path), content, True)
+ options.extend(['-c', path])
+
+ command = [sys.executable, pip, 'install'] + options
+
+ env = common_pip_environment()
+
+ execute_command(command, env=env, cwd=tempdir)
+ finally:
+ remove_tree(tempdir)
+
+
+def uninstall(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
+ """Perform a pip uninstall."""
+ packages = options['packages']
+ ignore_errors = options['ignore_errors']
+
+ options = common_pip_options()
+ options.extend(packages)
+
+ command = [sys.executable, pip, 'uninstall', '-y'] + options
+
+ env = common_pip_environment()
+
+ try:
+ execute_command(command, env=env, capture=True)
+ except SubprocessError:
+ if not ignore_errors:
+ raise
+
+
+# noinspection PyUnusedLocal
+def version(pip, options): # type: (str, t.Dict[str, t.Any]) -> None
+ """Report the pip version."""
+ del options
+
+ options = common_pip_options()
+
+ command = [sys.executable, pip, '-V'] + options
+
+ env = common_pip_environment()
+
+ execute_command(command, env=env, capture=True)
+
+
+def common_pip_environment(): # type: () -> t.Dict[str, str]
+ """Return common environment variables used to run pip."""
+ env = os.environ.copy()
+
+ return env
+
+
+def common_pip_options(): # type: () -> t.List[str]
+ """Return a list of common pip options."""
+ return [
+ '--disable-pip-version-check',
+ ]
+
+
+def devnull(): # type: () -> t.IO[bytes]
+ """Return a file object that references devnull."""
+ try:
+ return devnull.file
+ except AttributeError:
+ devnull.file = open(os.devnull, 'w+b') # pylint: disable=consider-using-with
+
+ return devnull.file
+
+
+def download_file(url, path): # type: (str, str) -> None
+ """Download the given URL to the specified file path."""
+ with open(to_bytes(path), 'wb') as saved_file:
+ with contextlib.closing(urlopen(url)) as download:
+ shutil.copyfileobj(download, saved_file)
+
+
+class ApplicationError(Exception):
+ """Base class for application exceptions."""
+
+
+class SubprocessError(ApplicationError):
+ """A command returned a non-zero status."""
+ def __init__(self, cmd, status, stdout, stderr): # type: (t.List[str], int, str, str) -> None
+ message = 'A command failed with status %d: %s' % (status, ' '.join(cmd_quote(c) for c in cmd))
+
+ if stderr:
+ message += '\n>>> Standard Error\n%s' % stderr.strip()
+
+ if stdout:
+ message += '\n>>> Standard Output\n%s' % stdout.strip()
+
+ super(SubprocessError, self).__init__(message)
+
+
+def log(message, verbosity=0): # type: (str, int) -> None
+ """Log a message to the console if the verbosity is high enough."""
+ if verbosity > VERBOSITY:
+ return
+
+ print(message, file=CONSOLE)
+ CONSOLE.flush()
+
+
+def execute_command(cmd, cwd=None, capture=False, env=None): # type: (t.List[str], t.Optional[str], bool, t.Optional[t.Dict[str, str]]) -> None
+ """Execute the specified command."""
+ log('Execute command: %s' % ' '.join(cmd_quote(c) for c in cmd), verbosity=1)
+
+ cmd_bytes = [to_bytes(c) for c in cmd]
+
+ if capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ else:
+ stdout = None
+ stderr = None
+
+ cwd_bytes = to_optional_bytes(cwd)
+ process = subprocess.Popen(cmd_bytes, cwd=cwd_bytes, stdin=devnull(), stdout=stdout, stderr=stderr, env=env) # pylint: disable=consider-using-with
+ stdout_bytes, stderr_bytes = process.communicate()
+ stdout_text = to_optional_text(stdout_bytes) or u''
+ stderr_text = to_optional_text(stderr_bytes) or u''
+
+ if process.returncode != 0:
+ raise SubprocessError(cmd, process.returncode, stdout_text, stderr_text)
+
+
+def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None
+ """Write the given text content to the specified path, optionally creating missing directories."""
+ if create_directories:
+ make_dirs(os.path.dirname(path))
+
+ with open_binary_file(path, 'wb') as file_obj:
+ file_obj.write(to_bytes(content))
+
+
+def remove_tree(path): # type: (str) -> None
+ """Remove the specified directory tree."""
+ try:
+ shutil.rmtree(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+
+def make_dirs(path): # type: (str) -> None
+ """Create a directory at path, including any necessary parent directories."""
+ try:
+ os.makedirs(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+
+def open_binary_file(path, mode='rb'): # type: (str, str) -> t.IO[bytes]
+ """Open the given path for binary access."""
+ if 'b' not in mode:
+ raise Exception('mode must include "b" for binary files: %s' % mode)
+
+ return io.open(to_bytes(path), mode) # pylint: disable=consider-using-with,unspecified-encoding
+
+
+def to_optional_bytes(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[bytes]
+ """Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
+ return None if value is None else to_bytes(value, errors)
+
+
+def to_optional_text(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[t.Text]
+ """Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
+ return None if value is None else to_text(value, errors)
+
+
+def to_bytes(value, errors='strict'): # type: (t.AnyStr, str) -> bytes
+ """Return the given value as bytes encoded using UTF-8 if not already bytes."""
+ if isinstance(value, bytes):
+ return value
+
+ if isinstance(value, Text):
+ return value.encode(ENCODING, errors)
+
+ raise Exception('value is not bytes or text: %s' % type(value))
+
+
+def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> t.Text
+ """Return the given value as text decoded using UTF-8 if not already text."""
+ if isinstance(value, bytes):
+ return value.decode(ENCODING, errors)
+
+ if isinstance(value, Text):
+ return value
+
+ raise Exception('value is not bytes or text: %s' % type(value))
+
+
+PAYLOAD = b'{payload}' # base-64 encoded JSON payload which will be populated before this script is executed
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_util/target/tools/virtualenvcheck.py b/test/lib/ansible_test/_util/target/tools/virtualenvcheck.py
new file mode 100644
index 0000000..a38ad07
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/tools/virtualenvcheck.py
@@ -0,0 +1,21 @@
+"""Detect the real python interpreter when running in a virtual environment created by the 'virtualenv' module."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+try:
+ # virtualenv <20
+ from sys import real_prefix
+except ImportError:
+ real_prefix = None
+
+try:
+ # venv and virtualenv >= 20
+ from sys import base_exec_prefix
+except ImportError:
+ base_exec_prefix = None
+
+print(json.dumps(dict(
+ real_prefix=real_prefix or base_exec_prefix,
+)))
diff --git a/test/lib/ansible_test/_util/target/tools/yamlcheck.py b/test/lib/ansible_test/_util/target/tools/yamlcheck.py
new file mode 100644
index 0000000..dfd08e5
--- /dev/null
+++ b/test/lib/ansible_test/_util/target/tools/yamlcheck.py
@@ -0,0 +1,20 @@
+"""Show availability of PyYAML and libyaml support."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+try:
+ import yaml
+except ImportError:
+ yaml = None
+
+try:
+ from yaml import CLoader
+except ImportError:
+ CLoader = None
+
+print(json.dumps(dict(
+ yaml=bool(yaml),
+ cloader=bool(CLoader),
+)))
diff --git a/test/lib/ansible_test/config/cloud-config-aws.ini.template b/test/lib/ansible_test/config/cloud-config-aws.ini.template
new file mode 100644
index 0000000..88b9fea
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-aws.ini.template
@@ -0,0 +1,26 @@
+# This is the configuration template for ansible-test AWS integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned AWS credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary AWS credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of AWS credentials requires an ansible-core-ci API key.
+
+[default]
+aws_access_key: @ACCESS_KEY
+aws_secret_key: @SECRET_KEY
+security_token: @SECURITY_TOKEN
+aws_region: @REGION
+# aws_cleanup controls whether the environment is cleaned up after tests have completed
+# This only applies to tests that have a cleanup stage
+# Defaults to true when using this template
+# aws_cleanup: true
+# aliases for backwards compatibility with older integration test playbooks
+ec2_access_key: {{ aws_access_key }}
+ec2_secret_key: {{ aws_secret_key }}
+ec2_region: {{ aws_region }}
diff --git a/test/lib/ansible_test/config/cloud-config-azure.ini.template b/test/lib/ansible_test/config/cloud-config-azure.ini.template
new file mode 100644
index 0000000..766553d
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-azure.ini.template
@@ -0,0 +1,30 @@
+# This is the configuration template for ansible-test Azure integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Azure credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Azure credentials,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Azure credentials requires an ansible-core-ci API key in ~/.ansible-core-ci.key
+
+[default]
+# Provide either Service Principal or Active Directory credentials below.
+
+# Service Principal
+AZURE_CLIENT_ID:
+AZURE_SECRET:
+AZURE_SUBSCRIPTION_ID:
+AZURE_TENANT:
+
+# Active Directory
+AZURE_AD_USER:
+AZURE_PASSWORD:
+AZURE_SUBSCRIPTION_ID:
+
+# Resource Groups
+RESOURCE_GROUP:
+RESOURCE_GROUP_SECONDARY:
diff --git a/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template
new file mode 100644
index 0000000..1c99e9b
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template
@@ -0,0 +1,9 @@
+# This is the configuration template for ansible-test cloudscale integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+
+[default]
+cloudscale_api_token = @API_TOKEN
diff --git a/test/lib/ansible_test/config/cloud-config-cs.ini.template b/test/lib/ansible_test/config/cloud-config-cs.ini.template
new file mode 100644
index 0000000..f8d8a91
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-cs.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test CloudStack integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test.
+#
+# If you do not want to use the automatically provided CloudStack simulator,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the simulator.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the simulator.
+
+[default]
+endpoint = http://@HOST:@PORT/client/api
+key = @KEY
+secret = @SECRET
+timeout = 60
diff --git a/test/lib/ansible_test/config/cloud-config-gcp.ini.template b/test/lib/ansible_test/config/cloud-config-gcp.ini.template
new file mode 100644
index 0000000..00a2097
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-gcp.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test GCP integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test.
+#
+# If you do not want to use the automatically provided GCP simulator,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the simulator.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the simulator.
+
+[default]
+gcp_project: @PROJECT
+gcp_cred_file: @CRED_FILE
+gcp_cred_kind: @CRED_KIND
+gcp_cred_email: @CRED_EMAIL
diff --git a/test/lib/ansible_test/config/cloud-config-hcloud.ini.template b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template
new file mode 100644
index 0000000..8db658d
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template
@@ -0,0 +1,15 @@
+# This is the configuration template for ansible-test Hetzner Cloud integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Hetzner Cloud credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Hetzner Cloud credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Hetzner Cloud credentials requires an ansible-core-ci API key.
+
+[default]
+hcloud_api_token= @TOKEN
diff --git a/test/lib/ansible_test/config/cloud-config-opennebula.ini.template b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template
new file mode 100644
index 0000000..00c56db
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template
@@ -0,0 +1,20 @@
+# This is the configuration template for ansible-test OpenNebula integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Running integration tests against previously recorded XMLRPC fixtures
+#
+# If you want to test against a Live OpenNebula platform,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+#
+# If you run with @FIXTURES enabled (true) then you can decide if you want to
+# run in @REPLAY mode (true) or, record mode (false).
+
+[default]
+opennebula_url: @URL
+opennebula_username: @USERNAME
+opennebula_password: @PASSWORD
+opennebula_test_fixture: @FIXTURES
+opennebula_test_fixture_replay: @REPLAY \ No newline at end of file
diff --git a/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template
new file mode 100644
index 0000000..0a10f23
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template
@@ -0,0 +1,12 @@
+# This is the configuration template for ansible-test OpenShift integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned openshift-origin docker container in ansible-test.
+#
+# If you do not want to use the automatically provided OpenShift container,
+# place your kubeconfig file next to this file, with the same name, but without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the automatically provided container.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the automatically provided container.
diff --git a/test/lib/ansible_test/config/cloud-config-scaleway.ini.template b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template
new file mode 100644
index 0000000..f10419e
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template
@@ -0,0 +1,13 @@
+# This is the configuration template for ansible-test Scaleway integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+# If you want to test against the Vultr public API,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+
+[default]
+key = @KEY
+org = @ORG
diff --git a/test/lib/ansible_test/config/cloud-config-vcenter.ini.template b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template
new file mode 100644
index 0000000..eff8bf7
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template
@@ -0,0 +1,26 @@
+# This is the configuration template for ansible-test VMware integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned VMware credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary VMware credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of VMware credentials requires an ansible-core-ci API key.
+
+[DEFAULT]
+vcenter_username: @VMWARE_USERNAME
+vcenter_password: @VMWARE_PASSWORD
+vcenter_hostname: @VMWARE_HOSTNAME
+vmware_validate_certs: @VMWARE_VALIDATE_CERTS
+esxi1_username: @ESXI1_USERNAME
+esxi1_hostname: @ESXI1_HOSTNAME
+esxi1_password: @ESXI1_PASSWORD
+esxi2_username: @ESXI2_USERNAME
+esxi2_hostname: @ESXI2_HOSTNAME
+esxi2_password: @ESXI2_PASSWORD
+vmware_proxy_host: @VMWARE_PROXY_HOST
+vmware_proxy_port: @VMWARE_PROXY_PORT
diff --git a/test/lib/ansible_test/config/cloud-config-vultr.ini.template b/test/lib/ansible_test/config/cloud-config-vultr.ini.template
new file mode 100644
index 0000000..48b8210
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-vultr.ini.template
@@ -0,0 +1,12 @@
+# This is the configuration template for ansible-test Vultr integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+# If you want to test against the Vultr public API,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+
+[default]
+key = @KEY
diff --git a/test/lib/ansible_test/config/config.yml b/test/lib/ansible_test/config/config.yml
new file mode 100644
index 0000000..9fca7af
--- /dev/null
+++ b/test/lib/ansible_test/config/config.yml
@@ -0,0 +1,41 @@
+# Sample ansible-test configuration file for collections.
+# Support for this feature was first added in ansible-core 2.12.
+# Use of this file is optional.
+# If used, this file must be placed in "tests/config.yml" relative to the base of the collection.
+
+modules:
+ # Configuration for modules/module_utils.
+ # These settings do not apply to other content in the collection.
+
+ python_requires: default
+ # Python versions supported by modules/module_utils.
+ # This setting is required.
+ #
+ # Possible values:
+ #
+ # - 'default' - All Python versions supported by Ansible.
+ # This is the default value if no configuration is provided.
+ # - 'controller' - All Python versions supported by the Ansible controller.
+ # This indicates the modules/module_utils can only run on the controller.
+ # Intended for use only with modules/module_utils that depend on ansible-connection, which only runs on the controller.
+ # Unit tests for modules/module_utils will be permitted to import any Ansible code, instead of only module_utils.
+ # - SpecifierSet - A PEP 440 specifier set indicating the supported Python versions.
+ # This is only needed when modules/module_utils do not support all Python versions supported by Ansible.
+ # It is not necessary to exclude versions which Ansible does not support, as this will be done automatically.
+ #
+ # What does this affect?
+ #
+ # - Unit tests will be skipped on any unsupported Python version.
+ # - Sanity tests that are Python version specific will be skipped on any unsupported Python version that is not supported by the controller.
+ #
+ # Sanity tests that are Python version specific will always be executed for Python versions supported by the controller, regardless of this setting.
+ # Reasons for this restriction include, but are not limited to:
+ #
+ # - AnsiballZ must be able to AST parse modules/module_utils on the controller, even though they may execute on a managed node.
+ # - ansible-doc must be able to AST parse modules/module_utils on the controller to display documentation.
+ # - ansible-test must be able to AST parse modules/module_utils to perform static analysis on them.
+ # - ansible-test must be able to execute portions of modules/module_utils to validate their argument specs.
+ #
+ # These settings only apply to modules/module_utils.
+ # It is not possible to declare supported Python versions for controller-only code.
+ # All Python versions supported by the controller must be supported by controller-only code.
diff --git a/test/lib/ansible_test/config/inventory.networking.template b/test/lib/ansible_test/config/inventory.networking.template
new file mode 100644
index 0000000..a154568
--- /dev/null
+++ b/test/lib/ansible_test/config/inventory.networking.template
@@ -0,0 +1,42 @@
+# This is the configuration template for ansible-test network-integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the `--platform` option to provision temporary network instances on EC2.
+#
+# If you do not want to use the automatically provisioned temporary network instances,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+#
+# NOTE: Automatic provisioning of network instances on EC2 requires an ansible-core-ci API key.
+
+[@OS]
+@NAME ansible_connection="local" ansible_host=@HOST ansible_network_os="@OS" ansible_user="@USER" ansible_ssh_private_key_file="@KEY_FILE"
+
+[aci:vars]
+aci_hostname=your-apic-1
+aci_username=admin
+aci_password=your-apic-password
+aci_validate_certs=no
+aci_use_ssl=yes
+aci_use_proxy=no
+
+[aci]
+localhost ansible_ssh_host=127.0.0.1 ansible_connection=local
+
+[mso:vars]
+mso_hostname=your-mso-1
+mso_username=admin
+mso_password=your-mso-password
+mso_validate_certs=no
+mso_use_ssl=yes
+mso_use_proxy=no
+
+[mso]
+localhost ansible_ssh_host=127.0.0.1 ansible_connection=local
+
+###
+# Example
+#
+# [vyos]
+# vyos01.example.net ansible_connection=local ansible_network_os="vyos" ansible_user=admin ansible_ssh_pass=mypassword
diff --git a/test/lib/ansible_test/config/inventory.winrm.template b/test/lib/ansible_test/config/inventory.winrm.template
new file mode 100644
index 0000000..34bbee2
--- /dev/null
+++ b/test/lib/ansible_test/config/inventory.winrm.template
@@ -0,0 +1,28 @@
+# This is the configuration template for ansible-test windows-integration tests.
+# It can also be used with the legacy `make` based method of running tests.
+#
+# You do not need this template if you are:
+#
+# 1) Using the `--windows` option to provision temporary Windows instances on EC2.
+#
+# If you do not want to use the automatically provisioned temporary Windows instances,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+#
+# NOTE: Automatic provisioning of Windows instances on EC2 requires an ansible-core-ci API key.
+#
+# REMINDER: Standard ports for winrm are 5985 (HTTP) and 5986 (HTTPS).
+
+[windows]
+@NAME ansible_host=@HOST ansible_user=@USER ansible_password=@PASSWORD ansible_port=@PORT
+
+[windows:vars]
+ansible_connection=winrm
+ansible_winrm_server_cert_validation=ignore
+
+# support winrm connection tests (temporary solution, does not support testing enable/disable of pipelining)
+[winrm:children]
+windows
+
+# support tests that target testhost
+[testhost:children]
+windows