summaryrefslogtreecommitdiffstats
path: root/test/lib/ansible_test
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /test/lib/ansible_test
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'test/lib/ansible_test')
-rw-r--r--test/lib/ansible_test/__init__.py0
-rw-r--r--test/lib/ansible_test/_data/ansible.cfg0
-rwxr-xr-xtest/lib/ansible_test/_data/cli/ansible_test_cli_stub.py28
-rw-r--r--test/lib/ansible_test/_data/collection_detail.py95
-rw-r--r--test/lib/ansible_test/_data/completion/docker.txt13
-rw-r--r--test/lib/ansible_test/_data/completion/network.txt2
-rw-r--r--test/lib/ansible_test/_data/completion/remote.txt12
-rw-r--r--test/lib/ansible_test/_data/completion/windows.txt6
-rw-r--r--test/lib/ansible_test/_data/coveragerc0
-rw-r--r--test/lib/ansible_test/_data/cryptography-constraints.txt3
l---------test/lib/ansible_test/_data/injector/ansible1
l---------test/lib/ansible_test/_data/injector/ansible-config1
l---------test/lib/ansible_test/_data/injector/ansible-connection1
l---------test/lib/ansible_test/_data/injector/ansible-console1
l---------test/lib/ansible_test/_data/injector/ansible-doc1
l---------test/lib/ansible_test/_data/injector/ansible-galaxy1
l---------test/lib/ansible_test/_data/injector/ansible-inventory1
l---------test/lib/ansible_test/_data/injector/ansible-playbook1
l---------test/lib/ansible_test/_data/injector/ansible-pull1
l---------test/lib/ansible_test/_data/injector/ansible-test1
l---------test/lib/ansible_test/_data/injector/ansible-vault1
l---------test/lib/ansible_test/_data/injector/importer.py1
l---------test/lib/ansible_test/_data/injector/pytest1
-rwxr-xr-xtest/lib/ansible_test/_data/injector/python.py80
-rw-r--r--test/lib/ansible_test/_data/injector/virtualenv-isolated.sh18
-rw-r--r--test/lib/ansible_test/_data/injector/virtualenv.sh14
-rw-r--r--test/lib/ansible_test/_data/inventory6
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml19
-rw-r--r--test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml77
-rw-r--r--test/lib/ansible_test/_data/pytest.ini9
-rw-r--r--test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py67
-rw-r--r--test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py68
-rw-r--r--test/lib/ansible_test/_data/quiet_pip.py70
-rw-r--r--test/lib/ansible_test/_data/requirements/ansible-test.txt6
-rw-r--r--test/lib/ansible_test/_data/requirements/constraints.txt64
-rw-r--r--test/lib/ansible_test/_data/requirements/coverage.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt3
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt39
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/integration.txt6
-rw-r--r--test/lib/ansible_test/_data/requirements/network-integration.txt7
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.changelog.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.import.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pep8.txt1
-rwxr-xr-xtest/lib/ansible_test/_data/requirements/sanity.ps145
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.pylint.txt3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt2
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt3
-rw-r--r--test/lib/ansible_test/_data/requirements/sanity.yamllint.txt1
-rw-r--r--test/lib/ansible_test/_data/requirements/units.txt7
-rw-r--r--test/lib/ansible_test/_data/requirements/windows-integration.txt11
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json13
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py68
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/changelog.json9
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/changelog.py49
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/empty-init.json14
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/empty-init.py16
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json6
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py46
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/line-endings.json4
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/line-endings.py18
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json6
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py44
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-assert.json10
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-assert.py24
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-basestring.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py28
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json5
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py82
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json10
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-main-display.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json5
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py28
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json7
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json11
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py150
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/shebang.json4
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/shebang.py120
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/symlinks.json5
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/symlinks.py32
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json10
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py21
-rw-r--r--test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json6
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py21
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/compile/compile.py41
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/import/importer.py467
-rw-r--r--test/lib/ansible_test/_data/sanity/import/yaml_to_json.py27
-rw-r--r--test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py15
-rw-r--r--test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt4
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/pslint/pslint.ps143
-rw-r--r--test/lib/ansible_test/_data/sanity/pslint/settings.psd113
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg39
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg135
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/default.cfg135
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg42
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py250
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py90
-rw-r--r--test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py242
-rw-r--r--test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt5
-rw-r--r--test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt3
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/validate-modules/main.py8
l---------test/lib/ansible_test/_data/sanity/validate-modules/validate-modules1
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py20
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py2444
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py170
-rwxr-xr-xtest/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1110
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py488
-rw-r--r--test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py218
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/config/default.yml19
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml19
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml19
-rw-r--r--test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py249
-rw-r--r--test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1453
-rw-r--r--test/lib/ansible_test/_data/setup/docker.sh14
-rw-r--r--test/lib/ansible_test/_data/setup/remote.sh159
-rw-r--r--test/lib/ansible_test/_data/setup/windows-httptester.ps1228
-rwxr-xr-xtest/lib/ansible_test/_data/sslcheck.py24
-rwxr-xr-xtest/lib/ansible_test/_data/versions.py20
-rwxr-xr-xtest/lib/ansible_test/_data/virtualenvcheck.py16
-rwxr-xr-xtest/lib/ansible_test/_data/yamlcheck.py21
-rw-r--r--test/lib/ansible_test/_internal/__init__.py3
-rw-r--r--test/lib/ansible_test/_internal/ansible_util.py296
-rw-r--r--test/lib/ansible_test/_internal/cache.py35
-rw-r--r--test/lib/ansible_test/_internal/ci/__init__.py227
-rw-r--r--test/lib/ansible_test/_internal/ci/azp.py268
-rw-r--r--test/lib/ansible_test/_internal/ci/local.py217
-rw-r--r--test/lib/ansible_test/_internal/ci/shippable.py269
-rw-r--r--test/lib/ansible_test/_internal/classification.py977
-rw-r--r--test/lib/ansible_test/_internal/cli.py1217
-rw-r--r--test/lib/ansible_test/_internal/cloud/__init__.py429
-rw-r--r--test/lib/ansible_test/_internal/cloud/acme.py193
-rw-r--r--test/lib/ansible_test/_internal/cloud/aws.py124
-rw-r--r--test/lib/ansible_test/_internal/cloud/azure.py213
-rw-r--r--test/lib/ansible_test/_internal/cloud/cloudscale.py80
-rw-r--r--test/lib/ansible_test/_internal/cloud/cs.py300
-rw-r--r--test/lib/ansible_test/_internal/cloud/fallaxy.py177
-rw-r--r--test/lib/ansible_test/_internal/cloud/foreman.py191
-rw-r--r--test/lib/ansible_test/_internal/cloud/gcp.py62
-rw-r--r--test/lib/ansible_test/_internal/cloud/hcloud.py116
-rw-r--r--test/lib/ansible_test/_internal/cloud/nios.py193
-rw-r--r--test/lib/ansible_test/_internal/cloud/opennebula.py66
-rw-r--r--test/lib/ansible_test/_internal/cloud/openshift.py236
-rw-r--r--test/lib/ansible_test/_internal/cloud/scaleway.py72
-rw-r--r--test/lib/ansible_test/_internal/cloud/tower.py255
-rw-r--r--test/lib/ansible_test/_internal/cloud/vcenter.py232
-rw-r--r--test/lib/ansible_test/_internal/cloud/vultr.py71
-rw-r--r--test/lib/ansible_test/_internal/config.py356
-rw-r--r--test/lib/ansible_test/_internal/constants.py10
-rw-r--r--test/lib/ansible_test/_internal/core_ci.py680
-rw-r--r--test/lib/ansible_test/_internal/coverage/__init__.py325
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/__init__.py19
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py154
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py64
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py39
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py104
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py146
-rw-r--r--test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py109
-rw-r--r--test/lib/ansible_test/_internal/coverage/combine.py303
-rw-r--r--test/lib/ansible_test/_internal/coverage/erase.py27
-rw-r--r--test/lib/ansible_test/_internal/coverage/html.py45
-rw-r--r--test/lib/ansible_test/_internal/coverage/report.py156
-rw-r--r--test/lib/ansible_test/_internal/coverage/xml.py191
-rw-r--r--test/lib/ansible_test/_internal/coverage_util.py125
-rw-r--r--test/lib/ansible_test/_internal/csharp_import_analysis.py106
-rw-r--r--test/lib/ansible_test/_internal/data.py200
-rw-r--r--test/lib/ansible_test/_internal/delegation.py667
-rw-r--r--test/lib/ansible_test/_internal/diff.py256
-rw-r--r--test/lib/ansible_test/_internal/docker_util.py409
-rw-r--r--test/lib/ansible_test/_internal/encoding.py41
-rw-r--r--test/lib/ansible_test/_internal/env.py293
-rw-r--r--test/lib/ansible_test/_internal/executor.py2146
-rw-r--r--test/lib/ansible_test/_internal/git.py137
-rw-r--r--test/lib/ansible_test/_internal/http.py181
-rw-r--r--test/lib/ansible_test/_internal/import_analysis.py362
-rw-r--r--test/lib/ansible_test/_internal/init.py16
-rw-r--r--test/lib/ansible_test/_internal/integration/__init__.py349
-rw-r--r--test/lib/ansible_test/_internal/io.py94
-rw-r--r--test/lib/ansible_test/_internal/manage_ci.py335
-rw-r--r--test/lib/ansible_test/_internal/metadata.py151
-rw-r--r--test/lib/ansible_test/_internal/payload.py146
-rw-r--r--test/lib/ansible_test/_internal/powershell_import_analysis.py105
-rw-r--r--test/lib/ansible_test/_internal/provider/__init__.py78
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/__init__.py232
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/ansible.py47
-rw-r--r--test/lib/ansible_test/_internal/provider/layout/collection.py123
-rw-r--r--test/lib/ansible_test/_internal/provider/source/__init__.py18
-rw-r--r--test/lib/ansible_test/_internal/provider/source/git.py72
-rw-r--r--test/lib/ansible_test/_internal/provider/source/installed.py43
-rw-r--r--test/lib/ansible_test/_internal/provider/source/unversioned.py87
-rw-r--r--test/lib/ansible_test/_internal/sanity/__init__.py946
-rw-r--r--test/lib/ansible_test/_internal/sanity/ansible_doc.py144
-rw-r--r--test/lib/ansible_test/_internal/sanity/bin_symlinks.py110
-rw-r--r--test/lib/ansible_test/_internal/sanity/compile.py92
-rw-r--r--test/lib/ansible_test/_internal/sanity/ignores.py89
-rw-r--r--test/lib/ansible_test/_internal/sanity/import.py184
-rw-r--r--test/lib/ansible_test/_internal/sanity/integration_aliases.py399
-rw-r--r--test/lib/ansible_test/_internal/sanity/pep8.py109
-rw-r--r--test/lib/ansible_test/_internal/sanity/pslint.py121
-rw-r--r--test/lib/ansible_test/_internal/sanity/pylint.py289
-rw-r--r--test/lib/ansible_test/_internal/sanity/rstcheck.py95
-rw-r--r--test/lib/ansible_test/_internal/sanity/sanity_docs.py62
-rw-r--r--test/lib/ansible_test/_internal/sanity/shellcheck.py110
-rw-r--r--test/lib/ansible_test/_internal/sanity/validate_modules.py149
-rw-r--r--test/lib/ansible_test/_internal/sanity/yamllint.py136
-rw-r--r--test/lib/ansible_test/_internal/target.py694
-rw-r--r--test/lib/ansible_test/_internal/test.py524
-rw-r--r--test/lib/ansible_test/_internal/thread.py57
-rw-r--r--test/lib/ansible_test/_internal/types.py32
-rw-r--r--test/lib/ansible_test/_internal/units/__init__.py159
-rw-r--r--test/lib/ansible_test/_internal/util.py853
-rw-r--r--test/lib/ansible_test/_internal/util_common.py487
-rw-r--r--test/lib/ansible_test/_internal/venv.py227
-rw-r--r--test/lib/ansible_test/config/cloud-config-aws.ini.template26
-rw-r--r--test/lib/ansible_test/config/cloud-config-azure.ini.template32
-rw-r--r--test/lib/ansible_test/config/cloud-config-cloudscale.ini.template9
-rw-r--r--test/lib/ansible_test/config/cloud-config-cs.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-gcp.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-hcloud.ini.template15
-rw-r--r--test/lib/ansible_test/config/cloud-config-opennebula.ini.template20
-rw-r--r--test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template12
-rw-r--r--test/lib/ansible_test/config/cloud-config-scaleway.ini.template13
-rw-r--r--test/lib/ansible_test/config/cloud-config-tower.ini.template18
-rw-r--r--test/lib/ansible_test/config/cloud-config-vcenter.ini.template26
-rw-r--r--test/lib/ansible_test/config/cloud-config-vultr.ini.template12
-rw-r--r--test/lib/ansible_test/config/inventory.networking.template42
-rw-r--r--test/lib/ansible_test/config/inventory.winrm.template28
245 files changed, 30550 insertions, 0 deletions
diff --git a/test/lib/ansible_test/__init__.py b/test/lib/ansible_test/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/lib/ansible_test/__init__.py
diff --git a/test/lib/ansible_test/_data/ansible.cfg b/test/lib/ansible_test/_data/ansible.cfg
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/lib/ansible_test/_data/ansible.cfg
diff --git a/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py b/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py
new file mode 100755
index 00000000..d12b6334
--- /dev/null
+++ b/test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# PYTHON_ARGCOMPLETE_OK
+"""Command line entry point for ansible-test."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main program entry point."""
+ ansible_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ source_root = os.path.join(ansible_root, 'test', 'lib')
+
+ if os.path.exists(os.path.join(source_root, 'ansible_test', '_internal', 'cli.py')):
+ # running from source, use that version of ansible-test instead of any version that may already be installed
+ sys.path.insert(0, source_root)
+
+ # noinspection PyProtectedMember
+ from ansible_test._internal.cli import main as cli_main
+
+ cli_main()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/collection_detail.py b/test/lib/ansible_test/_data/collection_detail.py
new file mode 100644
index 00000000..e7c883ca
--- /dev/null
+++ b/test/lib/ansible_test/_data/collection_detail.py
@@ -0,0 +1,95 @@
+"""Retrieve collection detail."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import sys
+
+import yaml
+
+
+# See semantic versioning specification (https://semver.org/)
+NUMERIC_IDENTIFIER = r'(?:0|[1-9][0-9]*)'
+ALPHANUMERIC_IDENTIFIER = r'(?:[0-9]*[a-zA-Z-][a-zA-Z0-9-]*)'
+
+PRE_RELEASE_IDENTIFIER = r'(?:' + NUMERIC_IDENTIFIER + r'|' + ALPHANUMERIC_IDENTIFIER + r')'
+BUILD_IDENTIFIER = r'[a-zA-Z0-9-]+' # equivalent to r'(?:[0-9]+|' + ALPHANUMERIC_IDENTIFIER + r')'
+
+VERSION_CORE = NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER + r'\.' + NUMERIC_IDENTIFIER
+PRE_RELEASE = r'(?:-' + PRE_RELEASE_IDENTIFIER + r'(?:\.' + PRE_RELEASE_IDENTIFIER + r')*)?'
+BUILD = r'(?:\+' + BUILD_IDENTIFIER + r'(?:\.' + BUILD_IDENTIFIER + r')*)?'
+
+SEMVER_REGULAR_EXPRESSION = r'^' + VERSION_CORE + PRE_RELEASE + BUILD + r'$'
+
+
+def validate_version(version):
+ """Raise exception if the provided version is not None or a valid semantic version."""
+ if version is None:
+ return
+ if not re.match(SEMVER_REGULAR_EXPRESSION, version):
+ raise Exception('Invalid version number "{0}". Collection version numbers must '
+ 'follow semantic versioning (https://semver.org/).'.format(version))
+
+
+def read_manifest_json(collection_path):
+ """Return collection information from the MANIFEST.json file."""
+ manifest_path = os.path.join(collection_path, 'MANIFEST.json')
+
+ if not os.path.exists(manifest_path):
+ return None
+
+ try:
+ with open(manifest_path) as manifest_file:
+ manifest = json.load(manifest_file)
+
+ collection_info = manifest.get('collection_info') or dict()
+
+ result = dict(
+ version=collection_info.get('version'),
+ )
+ validate_version(result['version'])
+ except Exception as ex: # pylint: disable=broad-except
+ raise Exception('{0}: {1}'.format(os.path.basename(manifest_path), ex))
+
+ return result
+
+
+def read_galaxy_yml(collection_path):
+ """Return collection information from the galaxy.yml file."""
+ galaxy_path = os.path.join(collection_path, 'galaxy.yml')
+
+ if not os.path.exists(galaxy_path):
+ return None
+
+ try:
+ with open(galaxy_path) as galaxy_file:
+ galaxy = yaml.safe_load(galaxy_file)
+
+ result = dict(
+ version=galaxy.get('version'),
+ )
+ validate_version(result['version'])
+ except Exception as ex: # pylint: disable=broad-except
+ raise Exception('{0}: {1}'.format(os.path.basename(galaxy_path), ex))
+
+ return result
+
+
+def main():
+ """Retrieve collection detail."""
+ collection_path = sys.argv[1]
+
+ try:
+ result = read_manifest_json(collection_path) or read_galaxy_yml(collection_path) or dict()
+ except Exception as ex: # pylint: disable=broad-except
+ result = dict(
+ error='{0}'.format(ex),
+ )
+
+ print(json.dumps(result))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/completion/docker.txt b/test/lib/ansible_test/_data/completion/docker.txt
new file mode 100644
index 00000000..3e4566dc
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/docker.txt
@@ -0,0 +1,13 @@
+default name=quay.io/ansible/default-test-container:2.9.0 python=3.6,2.6,2.7,3.5,3.7,3.8,3.9 seccomp=unconfined context=collection
+default name=quay.io/ansible/ansible-base-test-container:1.7.0 python=3.6,2.6,2.7,3.5,3.7,3.8,3.9 seccomp=unconfined context=ansible-base
+centos6 name=quay.io/ansible/centos6-test-container:1.26.0 python=2.6 seccomp=unconfined
+centos7 name=quay.io/ansible/centos7-test-container:1.17.0 python=2.7 seccomp=unconfined
+centos8 name=quay.io/ansible/centos8-test-container:1.21.0 python=3.6 seccomp=unconfined
+fedora30 name=quay.io/ansible/fedora30-test-container:1.17.0 python=3.7
+fedora31 name=quay.io/ansible/fedora31-test-container:1.17.0 python=3.7
+fedora32 name=quay.io/ansible/fedora32-test-container:1.17.0 python=3.8
+opensuse15py2 name=quay.io/ansible/opensuse15py2-test-container:1.21.0 python=2.7
+opensuse15 name=quay.io/ansible/opensuse15-test-container:1.21.0 python=3.6
+ubuntu1604 name=quay.io/ansible/ubuntu1604-test-container:1.21.0 python=2.7 seccomp=unconfined
+ubuntu1804 name=quay.io/ansible/ubuntu1804-test-container:1.21.0 python=3.6 seccomp=unconfined
+ubuntu2004 name=quay.io/ansible/ubuntu2004-test-container:1.21.0 python=3.8 seccomp=unconfined
diff --git a/test/lib/ansible_test/_data/completion/network.txt b/test/lib/ansible_test/_data/completion/network.txt
new file mode 100644
index 00000000..dca911f8
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/network.txt
@@ -0,0 +1,2 @@
+ios/csr1000v collection=cisco.ios connection=ansible.netcommon.network_cli
+vyos/1.1.8 collection=vyos.vyos connection=ansible.netcommon.network_cli
diff --git a/test/lib/ansible_test/_data/completion/remote.txt b/test/lib/ansible_test/_data/completion/remote.txt
new file mode 100644
index 00000000..dea4367b
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/remote.txt
@@ -0,0 +1,12 @@
+freebsd/11.1 python=2.7,3.6 python_dir=/usr/local/bin
+freebsd/12.1 python=3.6,2.7 python_dir=/usr/local/bin
+osx/10.11 python=2.7 python_dir=/usr/local/bin
+macos/10.15 python=3.8 python_dir=/usr/local/bin
+macos/11.1 python=3.9 python_dir=/usr/local/bin
+rhel/7.6 python=2.7
+rhel/7.8 python=2.7
+rhel/7.9 python=2.7
+rhel/8.1 python=3.6
+rhel/8.2 python=3.6
+aix/7.2 python=2.7 httptester=disabled temp-unicode=disabled pip-check=disabled
+power/centos/7 python=2.7
diff --git a/test/lib/ansible_test/_data/completion/windows.txt b/test/lib/ansible_test/_data/completion/windows.txt
new file mode 100644
index 00000000..a4f3bf58
--- /dev/null
+++ b/test/lib/ansible_test/_data/completion/windows.txt
@@ -0,0 +1,6 @@
+2008
+2008-R2
+2012
+2012-R2
+2016
+2019 \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/coveragerc b/test/lib/ansible_test/_data/coveragerc
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/test/lib/ansible_test/_data/coveragerc
diff --git a/test/lib/ansible_test/_data/cryptography-constraints.txt b/test/lib/ansible_test/_data/cryptography-constraints.txt
new file mode 100644
index 00000000..8e3e99b4
--- /dev/null
+++ b/test/lib/ansible_test/_data/cryptography-constraints.txt
@@ -0,0 +1,3 @@
+# do not add a cryptography constraint here, see the get_cryptography_requirement function in executor.py for details
+idna < 2.8 ; python_version < '2.7' # idna 2.8+ requires python 2.7+
+cffi != 1.14.4 # Fails on systems with older gcc. Should be fixed in the next release. https://foss.heptapod.net/pypy/cffi/-/issues/480
diff --git a/test/lib/ansible_test/_data/injector/ansible b/test/lib/ansible_test/_data/injector/ansible
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-config b/test/lib/ansible_test/_data/injector/ansible-config
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-config
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-connection b/test/lib/ansible_test/_data/injector/ansible-connection
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-connection
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-console b/test/lib/ansible_test/_data/injector/ansible-console
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-console
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-doc b/test/lib/ansible_test/_data/injector/ansible-doc
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-doc
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-galaxy b/test/lib/ansible_test/_data/injector/ansible-galaxy
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-galaxy
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-inventory b/test/lib/ansible_test/_data/injector/ansible-inventory
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-inventory
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-playbook b/test/lib/ansible_test/_data/injector/ansible-playbook
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-playbook
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-pull b/test/lib/ansible_test/_data/injector/ansible-pull
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-pull
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-test b/test/lib/ansible_test/_data/injector/ansible-test
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-test
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/ansible-vault b/test/lib/ansible_test/_data/injector/ansible-vault
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/ansible-vault
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/importer.py b/test/lib/ansible_test/_data/injector/importer.py
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/importer.py
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/pytest b/test/lib/ansible_test/_data/injector/pytest
new file mode 120000
index 00000000..6bbbfe4d
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/pytest
@@ -0,0 +1 @@
+python.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/injector/python.py b/test/lib/ansible_test/_data/injector/python.py
new file mode 100755
index 00000000..290b995c
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/python.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+"""Provides an entry point for python scripts and python modules on the controller with the current python interpreter and optional code coverage collection."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ name = os.path.basename(__file__)
+ args = [sys.executable]
+
+ coverage_config = os.environ.get('COVERAGE_CONF')
+ coverage_output = os.environ.get('COVERAGE_FILE')
+
+ if coverage_config:
+ if coverage_output:
+ args += ['-m', 'coverage.__main__', 'run', '--rcfile', coverage_config]
+ else:
+ if sys.version_info >= (3, 4):
+ # noinspection PyUnresolvedReferences
+ import importlib.util
+
+ # noinspection PyUnresolvedReferences
+ found = bool(importlib.util.find_spec('coverage'))
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ try:
+ # noinspection PyDeprecation
+ imp.find_module('coverage')
+ found = True
+ except ImportError:
+ found = False
+
+ if not found:
+ sys.exit('ERROR: Could not find `coverage` module. '
+ 'Did you use a virtualenv created without --system-site-packages or with the wrong interpreter?')
+
+ if name == 'python.py':
+ if sys.argv[1] == '-c':
+ # prevent simple misuse of python.py with -c which does not work with coverage
+ sys.exit('ERROR: Use `python -c` instead of `python.py -c` to avoid errors when code coverage is collected.')
+ elif name == 'pytest':
+ args += ['-m', 'pytest']
+ else:
+ args += [find_executable(name)]
+
+ args += sys.argv[1:]
+
+ os.execv(args[0], args)
+
+
+def find_executable(name):
+ """
+ :type name: str
+ :rtype: str
+ """
+ path = os.environ.get('PATH', os.path.defpath)
+ seen = set([os.path.abspath(__file__)])
+
+ for base in path.split(os.path.pathsep):
+ candidate = os.path.abspath(os.path.join(base, name))
+
+ if candidate in seen:
+ continue
+
+ seen.add(candidate)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ return candidate
+
+ raise Exception('Executable "%s" not found in path: %s' % (name, path))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh b/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh
new file mode 100644
index 00000000..af92a056
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/virtualenv-isolated.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Create and activate a fresh virtual environment with `source virtualenv-isolated.sh`.
+
+rm -rf "${OUTPUT_DIR}/venv"
+
+# Try to use 'venv' if it is available, then fallback to 'virtualenv' since some systems provide 'venv' although it is non-functional.
+if [ -z "${ANSIBLE_TEST_PREFER_VENV:-}" ] || [[ "${ANSIBLE_TEST_PYTHON_VERSION}" =~ ^2\. ]] || ! "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m venv "${OUTPUT_DIR}/venv" > /dev/null 2>&1; then
+ rm -rf "${OUTPUT_DIR}/venv"
+ "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m virtualenv --python "${ANSIBLE_TEST_PYTHON_INTERPRETER}" "${OUTPUT_DIR}/venv"
+fi
+
+set +ux
+source "${OUTPUT_DIR}/venv/bin/activate"
+set -ux
+
+if [[ "${ANSIBLE_TEST_COVERAGE}" ]]; then
+ pip install coverage -c ../../../runner/requirements/constraints.txt --disable-pip-version-check
+fi
diff --git a/test/lib/ansible_test/_data/injector/virtualenv.sh b/test/lib/ansible_test/_data/injector/virtualenv.sh
new file mode 100644
index 00000000..282e6074
--- /dev/null
+++ b/test/lib/ansible_test/_data/injector/virtualenv.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+# Create and activate a fresh virtual environment with `source virtualenv.sh`.
+
+rm -rf "${OUTPUT_DIR}/venv"
+
+# Try to use 'venv' if it is available, then fallback to 'virtualenv' since some systems provide 'venv' although it is non-functional.
+if [ -z "${ANSIBLE_TEST_PREFER_VENV:-}" ] || [[ "${ANSIBLE_TEST_PYTHON_VERSION}" =~ ^2\. ]] || ! "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m venv --system-site-packages "${OUTPUT_DIR}/venv" > /dev/null 2>&1; then
+ rm -rf "${OUTPUT_DIR}/venv"
+ "${ANSIBLE_TEST_PYTHON_INTERPRETER}" -m virtualenv --system-site-packages --python "${ANSIBLE_TEST_PYTHON_INTERPRETER}" "${OUTPUT_DIR}/venv"
+fi
+
+set +ux
+source "${OUTPUT_DIR}/venv/bin/activate"
+set -ux
diff --git a/test/lib/ansible_test/_data/inventory b/test/lib/ansible_test/_data/inventory
new file mode 100644
index 00000000..1b77a7ea
--- /dev/null
+++ b/test/lib/ansible_test/_data/inventory
@@ -0,0 +1,6 @@
+# Do not put test specific entries in this inventory file.
+# For script based test targets (using runme.sh) put the inventory file in the test's directory instead.
+
+[testgroup]
+# ansible_python_interpreter must be set to avoid interpreter discovery
+testhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"
diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml
new file mode 100644
index 00000000..2e5ff9c6
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_setup.yml
@@ -0,0 +1,19 @@
+---
+- name: setup global coverage directory for Windows test targets
+ hosts: windows
+ gather_facts: no
+ tasks:
+ - name: create temp directory
+ ansible.windows.win_file:
+ path: '{{ remote_temp_path }}'
+ state: directory
+
+ - name: allow everyone to write to coverage test dir
+ ansible.windows.win_acl:
+ path: '{{ remote_temp_path }}'
+ user: Everyone
+ rights: Modify
+ inherit: ContainerInherit, ObjectInherit
+ propagation: 'None'
+ type: allow
+ state: present \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml
new file mode 100644
index 00000000..ab34dc27
--- /dev/null
+++ b/test/lib/ansible_test/_data/playbooks/windows_coverage_teardown.yml
@@ -0,0 +1,77 @@
+---
+- name: collect the coverage files from the Windows host
+ hosts: windows
+ gather_facts: no
+ tasks:
+ - name: make sure all vars have been set
+ assert:
+ that:
+ - local_temp_path is defined
+ - remote_temp_path is defined
+
+ - name: zip up all coverage files in the
+ ansible.windows.win_shell: |
+ $coverage_dir = '{{ remote_temp_path }}'
+ $zip_file = Join-Path -Path $coverage_dir -ChildPath 'coverage.zip'
+ if (Test-Path -LiteralPath $zip_file) {
+ Remove-Item -LiteralPath $zip_file -Force
+ }
+
+ $coverage_files = Get-ChildItem -LiteralPath $coverage_dir -Include '*=coverage*' -File
+
+ $legacy = $false
+ try {
+ # Requires .NET 4.5+ which isn't present on older WIndows versions. Remove once 2008/R2 is EOL.
+ # We also can't use the Shell.Application as it will fail on GUI-less servers (Server Core).
+ Add-Type -AssemblyName System.IO.Compression -ErrorAction Stop > $null
+ } catch {
+ $legacy = $true
+ }
+
+ if ($legacy) {
+ New-Item -Path $zip_file -ItemType File > $null
+ $shell = New-Object -ComObject Shell.Application
+ $zip = $shell.Namespace($zip_file)
+ foreach ($file in $coverage_files) {
+ $zip.CopyHere($file.FullName)
+ }
+ } else {
+ $fs = New-Object -TypeName System.IO.FileStream -ArgumentList $zip_file, 'CreateNew'
+ try {
+ $archive = New-Object -TypeName System.IO.Compression.ZipArchive -ArgumentList @(
+ $fs,
+ [System.IO.Compression.ZipArchiveMode]::Create
+ )
+ try {
+ foreach ($file in $coverage_files) {
+ $archive_entry = $archive.CreateEntry($file.Name, 'Optimal')
+ $entry_fs = $archive_entry.Open()
+ try {
+ $file_fs = [System.IO.File]::OpenRead($file.FullName)
+ try {
+ $file_fs.CopyTo($entry_fs)
+ } finally {
+ $file_fs.Dispose()
+ }
+ } finally {
+ $entry_fs.Dispose()
+ }
+ }
+ } finally {
+ $archive.Dispose()
+ }
+ } finally {
+ $fs.Dispose()
+ }
+ }
+
+ - name: fetch coverage zip file to localhost
+ fetch:
+ src: '{{ remote_temp_path }}\coverage.zip'
+ dest: '{{ local_temp_path }}/coverage-{{ inventory_hostname }}.zip'
+ flat: yes
+
+ - name: remove the temporary coverage directory
+ ansible.windows.win_file:
+ path: '{{ remote_temp_path }}'
+ state: absent \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/pytest.ini b/test/lib/ansible_test/_data/pytest.ini
new file mode 100644
index 00000000..2ac56423
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest.ini
@@ -0,0 +1,9 @@
+[pytest]
+xfail_strict = true
+mock_use_standalone_module = true
+# It was decided to stick with "legacy" (aka "xunit1") for now.
+# Currently used pytest versions all support xunit2 format too.
+# Except the one used under Python 2.6 — it doesn't process this option
+# at all. Ref:
+# https://github.com/ansible/ansible/pull/66445#discussion_r372530176
+junit_family = xunit1
diff --git a/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py
new file mode 100644
index 00000000..67c69f15
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py
@@ -0,0 +1,67 @@
+"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself
+ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections')
+
+
+# this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0
+# NB: this code should never run under py2
+def collection_resolve_package_path(path):
+ """Configure the Python package path so that pytest can find our collections."""
+ for parent in path.parents:
+ if str(parent) == ANSIBLE_COLLECTIONS_PATH:
+ return parent
+
+ raise Exception('File "%s" not found in collection path "%s".' % (path, ANSIBLE_COLLECTIONS_PATH))
+
+
+# this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0
+def collection_pypkgpath(self):
+ """Configure the Python package path so that pytest can find our collections."""
+ for parent in self.parts(reverse=True):
+ if str(parent) == ANSIBLE_COLLECTIONS_PATH:
+ return parent
+
+ raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH))
+
+
+def pytest_configure():
+ """Configure this pytest plugin."""
+ try:
+ if pytest_configure.executed:
+ return
+ except AttributeError:
+ pytest_configure.executed = True
+
+ # noinspection PyProtectedMember
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+
+ # allow unit tests to import code from collections
+
+ # noinspection PyProtectedMember
+ _AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access
+
+ try:
+ # noinspection PyProtectedMember
+ from _pytest import pathlib as _pytest_pathlib
+ except ImportError:
+ _pytest_pathlib = None
+
+ if hasattr(_pytest_pathlib, 'resolve_package_path'):
+ _pytest_pathlib.resolve_package_path = collection_resolve_package_path
+ else:
+ # looks like pytest <= 6.0.0, use the old hack against py.path
+ # noinspection PyProtectedMember
+ import py._path.local
+
+ # force collections unit tests to be loaded with the ansible_collections namespace
+ # original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552
+ # noinspection PyProtectedMember
+ py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access
+
+
+pytest_configure()
diff --git a/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py
new file mode 100644
index 00000000..b05298ab
--- /dev/null
+++ b/test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_coverage.py
@@ -0,0 +1,68 @@
+"""Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`. PYTEST_DONT_REWRITE"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def pytest_configure():
+ """Configure this pytest plugin."""
+ try:
+ if pytest_configure.executed:
+ return
+ except AttributeError:
+ pytest_configure.executed = True
+
+ try:
+ import coverage
+ except ImportError:
+ coverage = None
+
+ try:
+ coverage.Coverage
+ except AttributeError:
+ coverage = None
+
+ if not coverage:
+ return
+
+ import gc
+ import os
+
+ coverage_instances = []
+
+ for obj in gc.get_objects():
+ if isinstance(obj, coverage.Coverage):
+ coverage_instances.append(obj)
+
+ if not coverage_instances:
+ coverage_config = os.environ.get('COVERAGE_CONF')
+
+ if not coverage_config:
+ return
+
+ coverage_output = os.environ.get('COVERAGE_FILE')
+
+ if not coverage_output:
+ return
+
+ cov = coverage.Coverage(config_file=coverage_config)
+ coverage_instances.append(cov)
+ else:
+ cov = None
+
+ # noinspection PyProtectedMember
+ os_exit = os._exit # pylint: disable=protected-access
+
+ def coverage_exit(*args, **kwargs):
+ for instance in coverage_instances:
+ instance.stop()
+ instance.save()
+
+ os_exit(*args, **kwargs)
+
+ os._exit = coverage_exit # pylint: disable=protected-access
+
+ if cov:
+ cov.start()
+
+
+pytest_configure()
diff --git a/test/lib/ansible_test/_data/quiet_pip.py b/test/lib/ansible_test/_data/quiet_pip.py
new file mode 100644
index 00000000..7d2a6d16
--- /dev/null
+++ b/test/lib/ansible_test/_data/quiet_pip.py
@@ -0,0 +1,70 @@
+"""Custom entry-point for pip that filters out unwanted logging and warnings."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import re
+import runpy
+import warnings
+
+BUILTIN_FILTERER_FILTER = logging.Filterer.filter
+
+LOGGING_MESSAGE_FILTER = re.compile("^("
+ ".*Running pip install with root privileges is generally not a good idea.*|" # custom Fedora patch [1]
+ "DEPRECATION: Python 2.7 will reach the end of its life .*|" # pip 19.2.3
+ "Ignoring .*: markers .* don't match your environment|"
+ "Requirement already satisfied.*"
+ ")$")
+
+# [1] https://src.fedoraproject.org/rpms/python-pip/blob/master/f/emit-a-warning-when-running-with-root-privileges.patch
+
+WARNING_MESSAGE_FILTERS = (
+ # DEPRECATION: Python 2.6 is no longer supported by the Python core team, please upgrade your Python.
+ # A future version of pip will drop support for Python 2.6
+ 'Python 2.6 is no longer supported by the Python core team, ',
+
+ # {path}/python2.6/lib/python2.6/site-packages/pip/_vendor/urllib3/util/ssl_.py:137: InsecurePlatformWarning:
+ # A true SSLContext object is not available. This prevents urllib3 from configuring SSL appropriately and may cause certain SSL connections to fail.
+ # You can upgrade to a newer version of Python to solve this.
+ # For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
+ 'A true SSLContext object is not available. ',
+
+ # {path}/python2.6/lib/python2.6/site-packages/pip/_vendor/urllib3/util/ssl_.py:339: SNIMissingWarning:
+ # An HTTPS request has been made, but the SNI (Subject Name Indication) extension to TLS is not available on this platform.
+ # This may cause the server to present an incorrect TLS certificate, which can cause validation failures.
+ # You can upgrade to a newer version of Python to solve this.
+ # For more information, see https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
+ 'An HTTPS request has been made, but the SNI ',
+
+ # DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained.
+ # pip 21.0 will drop support for Python 2.7 in January 2021.
+ # More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support
+ 'DEPRECATION: Python 2.7 reached the end of its life ',
+)
+
+
+def custom_filterer_filter(self, record):
+ """Globally omit logging of unwanted messages."""
+ if LOGGING_MESSAGE_FILTER.search(record.getMessage()):
+ return 0
+
+ return BUILTIN_FILTERER_FILTER(self, record)
+
+
+def main():
+ """Main program entry point."""
+ # Filtering logging output globally avoids having to intercept stdout/stderr.
+ # It also avoids problems with loss of color output and mixing up the order of stdout/stderr messages.
+ logging.Filterer.filter = custom_filterer_filter
+
+ for message_filter in WARNING_MESSAGE_FILTERS:
+ # Setting filterwarnings in code is necessary because of the following:
+ # Python 2.6 does not support the PYTHONWARNINGS environment variable. It does support the -W option.
+ # Python 2.7 cannot use the -W option to match warning text after a colon. This makes it impossible to match specific warning messages.
+ warnings.filterwarnings('ignore', message_filter)
+
+ runpy.run_module('pip.__main__', run_name='__main__', alter_sys=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/requirements/ansible-test.txt b/test/lib/ansible_test/_data/requirements/ansible-test.txt
new file mode 100644
index 00000000..7b596e1b
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/ansible-test.txt
@@ -0,0 +1,6 @@
+argparse ; python_version < '2.7'
+
+# pip 7.1 added support for constraints, which are required by ansible-test to install most python requirements
+# see https://github.com/pypa/pip/blame/e648e00dc0226ade30ade99591b245b0c98e86c9/NEWS.rst#L1258
+pip >= 7.1, < 10 ; python_version < '2.7' # pip 10+ drops support for python 2.6 (sanity_ok)
+pip >= 7.1 ; python_version >= '2.7' # sanity_ok
diff --git a/test/lib/ansible_test/_data/requirements/constraints.txt b/test/lib/ansible_test/_data/requirements/constraints.txt
new file mode 100644
index 00000000..81ee480c
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/constraints.txt
@@ -0,0 +1,64 @@
+coverage >= 4.5.1, < 5.0.0 ; python_version < '3.7' # coverage 4.4 required for "disable_warnings" support but 4.5.1 needed for bug fixes, coverage 5.0+ incompatible
+coverage >= 4.5.2, < 5.0.0 ; python_version == '3.7' # coverage 4.5.2 fixes bugs in support for python 3.7, coverage 5.0+ incompatible
+coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible
+cryptography < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6
+# do not add a cryptography constraint here unless it is for python version incompatibility, see the get_cryptography_requirement function in executor.py for details
+deepdiff < 4.0.0 ; python_version < '3' # deepdiff 4.0.0 and later require python 3
+jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later
+urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later
+pywinrm >= 0.3.0 # message encryption support
+sphinx < 1.6 ; python_version < '2.7' # sphinx 1.6 and later require python 2.7 or later
+sphinx < 1.8 ; python_version >= '2.7' # sphinx 1.8 and later are currently incompatible with rstcheck 3.3
+pygments >= 2.4.0 # Pygments 2.4.0 includes bugfixes for YAML and YAML+Jinja lexers
+wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
+yamllint != 1.8.0, < 1.14.0 ; python_version < '2.7' # yamllint 1.8.0 and 1.14.0+ require python 2.7+
+pycrypto >= 2.6 # Need features found in 2.6 and greater
+ncclient >= 0.5.2 # Need features added in 0.5.2 and greater
+idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead
+paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6
+pytest < 3.3.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6
+pytest < 5.0.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7
+pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later
+pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+)
+ntlm-auth >= 1.3.0 # message encryption support using cryptography
+requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6
+requests-ntlm >= 1.1.0 # message encryption support
+requests-credssp >= 0.1.0 # message encryption support
+voluptuous >= 0.11.0 # Schema recursion via Self
+openshift >= 0.6.2, < 0.9.0 # merge_type support
+virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later
+pathspec < 0.6.0 ; python_version < '2.7' # pathspec 0.6.0 and later require python 2.7 or later
+pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later
+pyparsing < 3.0.0 ; python_version < '3.5' # pyparsing 3 and later require python 3.5 or later
+pyfmg == 0.6.1 # newer versions do not pass current unit tests
+pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later
+pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later
+mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...)
+pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option
+xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later
+lxml < 4.3.0 ; python_version < '2.7' # lxml 4.3.0 and later require python 2.7 or later
+pyvmomi < 6.0.0 ; python_version < '2.7' # pyvmomi 6.0.0 and later require python 2.7 or later
+pyone == 1.1.9 # newer versions do not pass current integration tests
+boto3 < 1.11 ; python_version < '2.7' # boto3 1.11 drops Python 2.6 support
+botocore >= 1.10.0, < 1.14 ; python_version < '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca; botocore 1.14 drops Python 2.6 support
+botocore >= 1.10.0 ; python_version >= '2.7' # adds support for the following AWS services: secretsmanager, fms, and acm-pca
+setuptools < 37 ; python_version == '2.6' # setuptools 37 and later require python 2.7 or later
+setuptools < 45 ; python_version == '2.7' # setuptools 45 and later require python 3.5 or later
+
+# freeze antsibull-changelog for consistent test results
+antsibull-changelog == 0.9.0
+
+# Make sure we have a new enough antsibull for the CLI args we use
+antsibull >= 0.21.0
+
+# freeze pylint and its requirements for consistent test results
+astroid == 2.3.3
+isort == 4.3.15
+lazy-object-proxy == 1.4.3
+mccabe == 0.6.1
+pylint == 2.3.1
+typed-ast == 1.4.1
+wrapt == 1.11.1
+
+# freeze pycodestyle for consistent test results
+pycodestyle == 2.6.0
diff --git a/test/lib/ansible_test/_data/requirements/coverage.txt b/test/lib/ansible_test/_data/requirements/coverage.txt
new file mode 100644
index 00000000..4ebc8aea
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/coverage.txt
@@ -0,0 +1 @@
+coverage
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt
new file mode 100644
index 00000000..aa2f71cc
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.aws.txt
@@ -0,0 +1,3 @@
+boto
+boto3
+botocore
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt
new file mode 100644
index 00000000..6df1a4e8
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.azure.txt
@@ -0,0 +1,39 @@
+packaging
+requests[security]
+xmltodict
+azure-cli-core==2.0.35
+azure-cli-nspkg==3.0.2
+azure-common==1.1.11
+azure-mgmt-authorization==0.51.1
+azure-mgmt-batch==5.0.1
+azure-mgmt-cdn==3.0.0
+azure-mgmt-compute==10.0.0
+azure-mgmt-containerinstance==1.4.0
+azure-mgmt-containerregistry==2.0.0
+azure-mgmt-containerservice==4.4.0
+azure-mgmt-dns==2.1.0
+azure-mgmt-keyvault==1.1.0
+azure-mgmt-marketplaceordering==0.1.0
+azure-mgmt-monitor==0.5.2
+azure-mgmt-network==4.0.0
+azure-mgmt-nspkg==2.0.0
+azure-mgmt-redis==5.0.0
+azure-mgmt-resource==2.1.0
+azure-mgmt-rdbms==1.4.1
+azure-mgmt-servicebus==0.5.3
+azure-mgmt-sql==0.10.0
+azure-mgmt-storage==3.1.0
+azure-mgmt-trafficmanager==0.50.0
+azure-mgmt-web==0.41.0
+azure-nspkg==2.0.0
+azure-storage==0.35.1
+msrest==0.6.10
+msrestazure==0.6.2
+azure-keyvault==1.0.0a1
+azure-graphrbac==0.40.0
+azure-mgmt-cosmosdb==0.5.2
+azure-mgmt-hdinsight==0.1.0
+azure-mgmt-devtestlabs==3.0.0
+azure-mgmt-loganalytics==0.2.0
+azure-mgmt-automation==0.1.1
+azure-mgmt-iothub==0.7.0
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt
new file mode 100644
index 00000000..f0a89b91
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.cs.txt
@@ -0,0 +1,2 @@
+cs
+sshpubkeys
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt
new file mode 100644
index 00000000..a6580e69
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.hcloud.txt
@@ -0,0 +1 @@
+hcloud>=1.6.0 ; python_version >= '2.7' and python_version < '3.9' # Python 2.6 is not supported (sanity_ok); Only hcloud >= 1.6.0 supports Floating IPs with names; Python 3.9 and later are not supported
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt
new file mode 100644
index 00000000..be611454
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.nios.txt
@@ -0,0 +1 @@
+infoblox-client
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt
new file mode 100644
index 00000000..acd34668
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.opennebula.txt
@@ -0,0 +1 @@
+pyone \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt
new file mode 100644
index 00000000..269bf090
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.openshift.txt
@@ -0,0 +1 @@
+openshift
diff --git a/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt b/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt
new file mode 100644
index 00000000..fd8f1398
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.cloud.vcenter.txt
@@ -0,0 +1,2 @@
+pyvmomi
+git+https://github.com/vmware/vsphere-automation-sdk-python.git ; python_version >= '2.7' # Python 2.6 is not supported
diff --git a/test/lib/ansible_test/_data/requirements/integration.txt b/test/lib/ansible_test/_data/requirements/integration.txt
new file mode 100644
index 00000000..2c562615
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/integration.txt
@@ -0,0 +1,6 @@
+cryptography
+jinja2
+junit-xml
+ordereddict ; python_version < '2.7'
+packaging
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/network-integration.txt b/test/lib/ansible_test/_data/requirements/network-integration.txt
new file mode 100644
index 00000000..726d2943
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/network-integration.txt
@@ -0,0 +1,7 @@
+cryptography
+jinja2
+junit-xml
+ordereddict ; python_version < '2.7' # ansible-test junit callback plugin requirement
+packaging
+paramiko
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt
new file mode 100644
index 00000000..abd6c5fd
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.ansible-doc.txt
@@ -0,0 +1,2 @@
+jinja2 # ansible-base requirement
+pyyaml # ansible-base requirement
diff --git a/test/lib/ansible_test/_data/requirements/sanity.changelog.txt b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt
new file mode 100644
index 00000000..8a98acc9
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.changelog.txt
@@ -0,0 +1,2 @@
+# changelog build requires python 3.6+
+antsibull-changelog ; python_version >= '3.6'
diff --git a/test/lib/ansible_test/_data/requirements/sanity.import.txt b/test/lib/ansible_test/_data/requirements/sanity.import.txt
new file mode 100644
index 00000000..17e375ce
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.import.txt
@@ -0,0 +1,2 @@
+pyyaml # required for the collection loader to parse yaml for plugin routing
+virtualenv ; python_version <= '2.7' # virtualenv required on Python 2.x, but on Python 3.x we can use the built-in venv instead
diff --git a/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt
new file mode 100644
index 00000000..c3726e8b
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.integration-aliases.txt
@@ -0,0 +1 @@
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pep8.txt b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt
new file mode 100644
index 00000000..282a93fb
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pep8.txt
@@ -0,0 +1 @@
+pycodestyle
diff --git a/test/lib/ansible_test/_data/requirements/sanity.ps1 b/test/lib/ansible_test/_data/requirements/sanity.ps1
new file mode 100755
index 00000000..1ea1f8e5
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.ps1
@@ -0,0 +1,45 @@
+#!/usr/bin/env pwsh
+param (
+ [Switch]
+ $IsContainer
+)
+
+#Requires -Version 6
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$ProgressPreference = 'SilentlyContinue'
+
+Function Install-PSModule {
+ [CmdletBinding()]
+ param(
+ [Parameter(Mandatory=$true)]
+ [String]
+ $Name,
+
+ [Parameter(Mandatory=$true)]
+ [Version]
+ $RequiredVersion
+ )
+
+ # In case PSGallery is down we check if the module is already installed.
+ $installedModule = Get-Module -Name $Name -ListAvailable | Where-Object Version -eq $RequiredVersion
+ if (-not $installedModule) {
+ Install-Module -Name $Name -RequiredVersion $RequiredVersion -Scope CurrentUser
+ }
+}
+
+Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
+Install-PSModule -Name PSScriptAnalyzer -RequiredVersion 1.18.0
+
+if ($IsContainer) {
+ # PSScriptAnalyzer contain lots of json files for the UseCompatibleCommands check. We don't use this rule so by
+ # removing the contents we can save 200MB in the docker image (or more in the future).
+ # https://github.com/PowerShell/PSScriptAnalyzer/blob/master/RuleDocumentation/UseCompatibleCommands.md
+ $pssaPath = (Get-Module -ListAvailable -Name PSScriptAnalyzer).ModuleBase
+ $compatPath = Join-Path -Path $pssaPath -ChildPath compatibility_profiles -AdditionalChildPath '*'
+ Remove-Item -Path $compatPath -Recurse -Force
+}
+
+# Installed the PSCustomUseLiteralPath rule
+Install-PSModule -Name PSSA-PSCustomUseLiteralPath -RequiredVersion 0.1.1
diff --git a/test/lib/ansible_test/_data/requirements/sanity.pylint.txt b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt
new file mode 100644
index 00000000..438ca51d
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.pylint.txt
@@ -0,0 +1,3 @@
+pylint
+pyyaml # needed for collection_detail.py
+mccabe # pylint complexity testing
diff --git a/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt b/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt
new file mode 100644
index 00000000..3a5eeed1
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.rstcheck.txt
@@ -0,0 +1 @@
+rstcheck
diff --git a/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt
new file mode 100644
index 00000000..edd96991
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.runtime-metadata.txt
@@ -0,0 +1,2 @@
+pyyaml
+voluptuous
diff --git a/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt
new file mode 100644
index 00000000..5c0fca78
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.validate-modules.txt
@@ -0,0 +1,3 @@
+jinja2 # ansible-base requirement
+pyyaml # needed for collection_detail.py
+voluptuous
diff --git a/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt
new file mode 100644
index 00000000..b2c729ca
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/sanity.yamllint.txt
@@ -0,0 +1 @@
+yamllint
diff --git a/test/lib/ansible_test/_data/requirements/units.txt b/test/lib/ansible_test/_data/requirements/units.txt
new file mode 100644
index 00000000..307d7c35
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/units.txt
@@ -0,0 +1,7 @@
+cryptography
+jinja2
+mock
+pytest
+pytest-mock
+pytest-xdist
+pyyaml
diff --git a/test/lib/ansible_test/_data/requirements/windows-integration.txt b/test/lib/ansible_test/_data/requirements/windows-integration.txt
new file mode 100644
index 00000000..86de35ee
--- /dev/null
+++ b/test/lib/ansible_test/_data/requirements/windows-integration.txt
@@ -0,0 +1,11 @@
+cryptography
+jinja2
+junit-xml
+ntlm-auth
+ordereddict ; python_version < '2.7' # ansible-test junit callback plugin requirement
+requests-ntlm
+requests-credssp
+packaging
+pypsrp
+pywinrm[credssp]
+pyyaml
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json
new file mode 100644
index 00000000..12bbe0d1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.json
@@ -0,0 +1,13 @@
+{
+ "all_targets": true,
+ "prefixes": [
+ "lib/ansible/modules/",
+ "lib/ansible/plugins/action/",
+ "plugins/modules/",
+ "plugins/action/"
+ ],
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py
new file mode 100755
index 00000000..65142e00
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/action-plugin-docs.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+"""Test to verify action plugins have an associated module to provide documentation."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ module_names = set()
+
+ module_prefixes = {
+ 'lib/ansible/modules/': True,
+ 'plugins/modules/': False,
+ }
+
+ action_prefixes = {
+ 'lib/ansible/plugins/action/': True,
+ 'plugins/action/': False,
+ }
+
+ for path in paths:
+ full_name = get_full_name(path, module_prefixes)
+
+ if full_name:
+ module_names.add(full_name)
+
+ for path in paths:
+ full_name = get_full_name(path, action_prefixes)
+
+ if full_name and full_name not in module_names:
+ print('%s: action plugin has no matching module to provide documentation' % path)
+
+
+def get_full_name(path, prefixes):
+ """Return the full name of the plugin at the given path by matching against the given path prefixes, or None if no match is found."""
+ for prefix, flat in prefixes.items():
+ if path.startswith(prefix):
+ relative_path = os.path.relpath(path, prefix)
+
+ if flat:
+ full_name = os.path.basename(relative_path)
+ else:
+ full_name = relative_path
+
+ full_name = os.path.splitext(full_name)[0]
+
+ name = os.path.basename(full_name)
+
+ if name == '__init__':
+ return None
+
+ if name.startswith('_'):
+ name = name[1:]
+
+ full_name = os.path.join(os.path.dirname(full_name), name).replace(os.path.sep, '.')
+
+ return full_name
+
+ return None
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/changelog.json b/test/lib/ansible_test/_data/sanity/code-smell/changelog.json
new file mode 100644
index 00000000..87f223b1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/changelog.json
@@ -0,0 +1,9 @@
+{
+ "intercept": true,
+ "minimum_python_version": "3.6",
+ "prefixes": [
+ "changelogs/config.yaml",
+ "changelogs/fragments/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/changelog.py b/test/lib/ansible_test/_data/sanity/code-smell/changelog.py
new file mode 100755
index 00000000..710b10f6
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/changelog.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import subprocess
+
+
+def main():
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = ('.yml', '.yaml')
+ config_path = 'changelogs/config.yaml'
+
+ # config must be detected independent of the file list since the file list only contains files under test (changed)
+ has_config = os.path.exists(config_path)
+ paths_to_check = []
+ for path in paths:
+ if path == config_path:
+ continue
+
+ if path.startswith('changelogs/fragments/.'):
+ if path in ('changelogs/fragments/.keep', 'changelogs/fragments/.gitkeep'):
+ continue
+
+ print('%s:%d:%d: file must not be a dotfile' % (path, 0, 0))
+ continue
+
+ ext = os.path.splitext(path)[1]
+
+ if ext not in allowed_extensions:
+ print('%s:%d:%d: extension must be one of: %s' % (path, 0, 0, ', '.join(allowed_extensions)))
+
+ paths_to_check.append(path)
+
+ if not has_config:
+ print('changelogs/config.yaml:0:0: config file does not exist')
+ return
+
+ if not paths_to_check:
+ return
+
+ cmd = [sys.executable, '-m', 'antsibull_changelog', 'lint'] + paths_to_check
+ subprocess.call(cmd) # ignore the return code, rely on the output instead
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json
new file mode 100644
index 00000000..9835f9b6
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.json
@@ -0,0 +1,14 @@
+{
+ "prefixes": [
+ "lib/ansible/modules/",
+ "lib/ansible/module_utils/",
+ "plugins/modules/",
+ "plugins/module_utils/",
+ "test/units/",
+ "tests/unit/"
+ ],
+ "files": [
+ "__init__.py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py
new file mode 100755
index 00000000..8bcd7f9e
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/empty-init.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ if os.path.getsize(path) > 0:
+ print('%s: empty __init__.py required' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json
new file mode 100644
index 00000000..6f1edb78
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.json
@@ -0,0 +1,6 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py
new file mode 100755
index 00000000..81081eed
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/future-import-boilerplate.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ lines = path_fd.read().splitlines()
+
+ missing = True
+ if not lines:
+ # Files are allowed to be empty of everything including boilerplate
+ missing = False
+
+ for text in lines:
+ if text in (b'from __future__ import (absolute_import, division, print_function)',
+ b'from __future__ import absolute_import, division, print_function'):
+ missing = False
+ break
+
+ if missing:
+ with open(path) as file:
+ contents = file.read()
+
+ # noinspection PyBroadException
+ try:
+ node = ast.parse(contents)
+
+ # files consisting of only assignments have no need for future import boilerplate
+ # the only exception would be division during assignment, but we'll overlook that for simplicity
+ # the most likely case is that of a documentation only python file
+ if all(isinstance(statement, ast.Assign) for statement in node.body):
+ missing = False
+ except Exception: # pylint: disable=broad-except
+ pass # the compile sanity test will report this error
+
+ if missing:
+ print('%s: missing: from __future__ import (absolute_import, division, print_function)' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json
new file mode 100644
index 00000000..db5c3c98
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.json
@@ -0,0 +1,4 @@
+{
+ "text": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py
new file mode 100755
index 00000000..1e4212d1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/line-endings.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ contents = path_fd.read()
+
+ if b'\r' in contents:
+ print('%s: use "\\n" for line endings instead of "\\r\\n"' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json
new file mode 100644
index 00000000..6f1edb78
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.json
@@ -0,0 +1,6 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py
new file mode 100755
index 00000000..28d06f36
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/metaclass-boilerplate.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ lines = path_fd.read().splitlines()
+
+ missing = True
+ if not lines:
+ # Files are allowed to be empty of everything including boilerplate
+ missing = False
+
+ for text in lines:
+ if text == b'__metaclass__ = type':
+ missing = False
+ break
+
+ if missing:
+ with open(path) as file:
+ contents = file.read()
+
+ # noinspection PyBroadException
+ try:
+ node = ast.parse(contents)
+
+ # files consisting of only assignments have no need for metaclass boilerplate
+ # the most likely case is that of a documentation only python file
+ if all(isinstance(statement, ast.Assign) for statement in node.body):
+ missing = False
+ except Exception: # pylint: disable=broad-except
+ pass # the compile sanity test will report this error
+
+ if missing:
+ print('%s: missing: __metaclass__ = type' % path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json
new file mode 100644
index 00000000..ccee80a2
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.json
@@ -0,0 +1,10 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "prefixes": [
+ "lib/ansible/",
+ "plugins/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py
new file mode 100755
index 00000000..78561d96
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-assert.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+ASSERT_RE = re.compile(r'^\s*assert[^a-z0-9_:]')
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as f:
+ for i, line in enumerate(f.readlines()):
+ matches = ASSERT_RE.findall(line)
+
+ if matches:
+ lineno = i + 1
+ colno = line.index('assert') + 1
+ print('%s:%d:%d: raise AssertionError instead of: %s' % (path, lineno, colno, matches[0][colno - 1:]))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py
new file mode 100755
index 00000000..a35650ef
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-basestring.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(isinstance.*basestring)', text)
+
+ if match:
+ print('%s:%d:%d: do not use `isinstance(s, basestring)`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py
new file mode 100755
index 00000000..e28b24f4
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iteritems.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(?<! six)\.(iteritems)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.items` or `ansible.module_utils.six.iteritems` instead of `dict.iteritems`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py
new file mode 100755
index 00000000..237ee5b1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-iterkeys.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'\.(iterkeys)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.keys` or `for key in dict:` instead of `dict.iterkeys`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py
new file mode 100755
index 00000000..4bf92ea9
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-dict-itervalues.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(?<! six)\.(itervalues)', text)
+
+ if match:
+ print('%s:%d:%d: use `dict.values` or `ansible.module_utils.six.itervalues` instead of `dict.itervalues`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py
new file mode 100755
index 00000000..c925f5b7
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-get-exception.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ basic_allow_once = True
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'([^a-zA-Z0-9_]get_exception[^a-zA-Z0-9_])', text)
+
+ if match:
+ if path == 'lib/ansible/module_utils/basic.py' and basic_allow_once:
+ # basic.py is allowed to import get_exception for backwards compatibility but should not call it anywhere
+ basic_allow_once = False
+ continue
+
+ print('%s:%d:%d: do not use `get_exception`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json
new file mode 100644
index 00000000..6f13c86b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.json
@@ -0,0 +1,5 @@
+{
+ "include_directories": true,
+ "include_symlinks": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py
new file mode 100755
index 00000000..99432ea1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-illegal-filenames.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+
+# a script to check for illegal filenames on various Operating Systems. The
+# main rules are derived from restrictions on Windows
+# https://msdn.microsoft.com/en-us/library/aa365247#naming_conventions
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import struct
+import sys
+
+from ansible.module_utils.basic import to_bytes
+
+ILLEGAL_CHARS = [
+ b'<',
+ b'>',
+ b':',
+ b'"',
+ b'/',
+ b'\\',
+ b'|',
+ b'?',
+ b'*'
+] + [struct.pack("b", i) for i in range(32)]
+
+ILLEGAL_NAMES = [
+ "CON",
+ "PRN",
+ "AUX",
+ "NUL",
+ "COM1",
+ "COM2",
+ "COM3",
+ "COM4",
+ "COM5",
+ "COM6",
+ "COM7",
+ "COM8",
+ "COM9",
+ "LPT1",
+ "LPT2",
+ "LPT3",
+ "LPT4",
+ "LPT5",
+ "LPT6",
+ "LPT7",
+ "LPT8",
+ "LPT9",
+]
+
+ILLEGAL_END_CHARS = [
+ '.',
+ ' ',
+]
+
+
+def check_path(path, is_dir=False):
+ type_name = 'directory' if is_dir else 'file'
+ file_name = os.path.basename(path.rstrip(os.path.sep))
+ name = os.path.splitext(file_name)[0]
+
+ if name.upper() in ILLEGAL_NAMES:
+ print("%s: illegal %s name %s" % (path, type_name, name.upper()))
+
+ if file_name[-1] in ILLEGAL_END_CHARS:
+ print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))
+
+ bfile = to_bytes(file_name, encoding='utf-8')
+ for char in ILLEGAL_CHARS:
+ if char in bfile:
+ bpath = to_bytes(path, encoding='utf-8')
+ print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ check_path(path, is_dir=path.endswith(os.path.sep))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json
new file mode 100644
index 00000000..ccee80a2
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.json
@@ -0,0 +1,10 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "prefixes": [
+ "lib/ansible/",
+ "plugins/"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py
new file mode 100755
index 00000000..74a36ecc
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-main-display.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+MAIN_DISPLAY_IMPORT = 'from __main__ import display'
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as f:
+ for i, line in enumerate(f.readlines()):
+ if MAIN_DISPLAY_IMPORT in line:
+ lineno = i + 1
+ colno = line.index(MAIN_DISPLAY_IMPORT) + 1
+ print('%s:%d:%d: Display is a singleton, just import and instantiate' % (path, lineno, colno))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json
new file mode 100644
index 00000000..54d9fff5
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.json
@@ -0,0 +1,5 @@
+{
+ "text": true,
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py
new file mode 100755
index 00000000..e44005a5
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-smart-quotes.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ try:
+ text = text.decode('utf-8')
+ except UnicodeDecodeError as ex:
+ print('%s:%d:%d: UnicodeDecodeError: %s' % (path, line + 1, ex.start + 1, ex))
+ continue
+
+ match = re.search(u'([‘’“”])', text)
+
+ if match:
+ print('%s:%d:%d: use ASCII quotes `\'` and `"` instead of Unicode quotes' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py
new file mode 100755
index 00000000..e2201ab1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/no-unicode-literals.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(unicode_literals)', text)
+
+ if match:
+ print('%s:%d:%d: do not use `unicode_literals`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json
new file mode 100644
index 00000000..88858aeb
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.json
@@ -0,0 +1,7 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "ignore_self": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py
new file mode 100755
index 00000000..b2de1ba8
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/replace-urlopen.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'^(?:[^#]*?)(urlopen)', text)
+
+ if match:
+ print('%s:%d:%d: use `ansible.module_utils.urls.open_url` instead of `urlopen`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json
new file mode 100644
index 00000000..44003ec0
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.json
@@ -0,0 +1,11 @@
+{
+ "prefixes": [
+ "lib/ansible/config/ansible_builtin_runtime.yml",
+ "meta/routing.yml",
+ "meta/runtime.yml"
+ ],
+ "extensions": [
+ ".yml"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py
new file mode 100755
index 00000000..b986db2b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/runtime-metadata.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+"""Schema validation of ansible-base's ansible_builtin_runtime.yml and collection's meta/runtime.yml"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import os
+import re
+import sys
+import yaml
+
+from voluptuous import Any, MultipleInvalid, PREVENT_EXTRA
+from voluptuous import Required, Schema, Invalid
+from voluptuous.humanize import humanize_error
+
+from ansible.module_utils.six import string_types
+
+
+def isodate(value):
+ """Validate a datetime.date or ISO 8601 date string."""
+ # datetime.date objects come from YAML dates, these are ok
+ if isinstance(value, datetime.date):
+ return value
+ # make sure we have a string
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'
+ if not isinstance(value, string_types):
+ raise Invalid(msg)
+ try:
+ datetime.datetime.strptime(value, '%Y-%m-%d').date()
+ except ValueError:
+ raise Invalid(msg)
+ return value
+
+
+def validate_metadata_file(path):
+ """Validate explicit runtime metadata file"""
+ try:
+ with open(path, 'r') as f_path:
+ routing = yaml.safe_load(f_path)
+ except yaml.error.MarkedYAMLError as ex:
+ print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line +
+ 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
+ return
+ except Exception as ex: # pylint: disable=broad-except
+ print('%s:%d:%d: YAML load failed: %s' %
+ (path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
+ return
+
+ # Updates to schema MUST also be reflected in the documentation
+ # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html
+
+ # plugin_routing schema
+
+ deprecation_tombstoning_schema = Any(Schema(
+ {
+ Required('removal_date'): Any(isodate),
+ 'warning_text': Any(*string_types),
+ },
+ extra=PREVENT_EXTRA
+ ), Schema(
+ {
+ Required('removal_version'): Any(*string_types),
+ 'warning_text': Any(*string_types),
+ },
+ extra=PREVENT_EXTRA
+ ))
+
+ plugin_routing_schema = Any(
+ Schema({
+ ('deprecation'): Any(deprecation_tombstoning_schema),
+ ('tombstone'): Any(deprecation_tombstoning_schema),
+ ('redirect'): Any(*string_types),
+ }, extra=PREVENT_EXTRA),
+ )
+
+ list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}
+ for str_type in string_types]
+
+ plugin_schema = Schema({
+ ('action'): Any(None, *list_dict_plugin_routing_schema),
+ ('become'): Any(None, *list_dict_plugin_routing_schema),
+ ('cache'): Any(None, *list_dict_plugin_routing_schema),
+ ('callback'): Any(None, *list_dict_plugin_routing_schema),
+ ('cliconf'): Any(None, *list_dict_plugin_routing_schema),
+ ('connection'): Any(None, *list_dict_plugin_routing_schema),
+ ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
+ ('filter'): Any(None, *list_dict_plugin_routing_schema),
+ ('httpapi'): Any(None, *list_dict_plugin_routing_schema),
+ ('inventory'): Any(None, *list_dict_plugin_routing_schema),
+ ('lookup'): Any(None, *list_dict_plugin_routing_schema),
+ ('module_utils'): Any(None, *list_dict_plugin_routing_schema),
+ ('modules'): Any(None, *list_dict_plugin_routing_schema),
+ ('netconf'): Any(None, *list_dict_plugin_routing_schema),
+ ('shell'): Any(None, *list_dict_plugin_routing_schema),
+ ('strategy'): Any(None, *list_dict_plugin_routing_schema),
+ ('terminal'): Any(None, *list_dict_plugin_routing_schema),
+ ('test'): Any(None, *list_dict_plugin_routing_schema),
+ ('vars'): Any(None, *list_dict_plugin_routing_schema),
+ }, extra=PREVENT_EXTRA)
+
+ # import_redirection schema
+
+ import_redirection_schema = Any(
+ Schema({
+ ('redirect'): Any(*string_types),
+ # import_redirect doesn't currently support deprecation
+ }, extra=PREVENT_EXTRA)
+ )
+
+ list_dict_import_redirection_schema = [{str_type: import_redirection_schema}
+ for str_type in string_types]
+
+ # top level schema
+
+ schema = Schema({
+ # All of these are optional
+ ('plugin_routing'): Any(plugin_schema),
+ ('import_redirection'): Any(None, *list_dict_import_redirection_schema),
+ # requires_ansible: In the future we should validate this with SpecifierSet
+ ('requires_ansible'): Any(*string_types),
+ ('action_groups'): dict,
+ }, extra=PREVENT_EXTRA)
+
+ # Ensure schema is valid
+
+ try:
+ schema(routing)
+ except MultipleInvalid as ex:
+ for error in ex.errors:
+ # No way to get line/column numbers
+ print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))
+
+
+def main():
+ """Validate runtime metadata"""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ collection_legacy_file = 'meta/routing.yml'
+ collection_runtime_file = 'meta/runtime.yml'
+
+ for path in paths:
+ if path == collection_legacy_file:
+ print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file)))
+ continue
+
+ validate_metadata_file(path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/shebang.json b/test/lib/ansible_test/_data/sanity/code-smell/shebang.json
new file mode 100644
index 00000000..5648429e
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/shebang.json
@@ -0,0 +1,4 @@
+{
+ "text": true,
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/shebang.py b/test/lib/ansible_test/_data/sanity/code-smell/shebang.py
new file mode 100755
index 00000000..7cf3cf72
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/shebang.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import stat
+import sys
+
+
+def main():
+ standard_shebangs = set([
+ b'#!/bin/bash -eu',
+ b'#!/bin/bash -eux',
+ b'#!/bin/sh',
+ b'#!/usr/bin/env bash',
+ b'#!/usr/bin/env fish',
+ b'#!/usr/bin/env pwsh',
+ b'#!/usr/bin/env python',
+ b'#!/usr/bin/make -f',
+ ])
+
+ integration_shebangs = set([
+ b'#!/bin/sh',
+ b'#!/usr/bin/env bash',
+ b'#!/usr/bin/env python',
+ ])
+
+ module_shebangs = {
+ '': b'#!/usr/bin/python',
+ '.py': b'#!/usr/bin/python',
+ '.ps1': b'#!powershell',
+ }
+
+ # see https://unicode.org/faq/utf_bom.html#bom1
+ byte_order_marks = (
+ (b'\x00\x00\xFE\xFF', 'UTF-32 (BE)'),
+ (b'\xFF\xFE\x00\x00', 'UTF-32 (LE)'),
+ (b'\xFE\xFF', 'UTF-16 (BE)'),
+ (b'\xFF\xFE', 'UTF-16 (LE)'),
+ (b'\xEF\xBB\xBF', 'UTF-8'),
+ )
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as path_fd:
+ shebang = path_fd.readline().strip()
+ mode = os.stat(path).st_mode
+ executable = (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & mode
+
+ if not shebang or not shebang.startswith(b'#!'):
+ if executable:
+ print('%s:%d:%d: file without shebang should not be executable' % (path, 0, 0))
+
+ for mark, name in byte_order_marks:
+ if shebang.startswith(mark):
+ print('%s:%d:%d: file starts with a %s byte order mark' % (path, 0, 0, name))
+ break
+
+ continue
+
+ is_module = False
+ is_integration = False
+
+ dirname = os.path.dirname(path)
+
+ if path.startswith('lib/ansible/modules/'):
+ is_module = True
+ elif re.search('^test/support/[^/]+/plugins/modules/', path):
+ is_module = True
+ elif re.search('^test/support/[^/]+/collections/ansible_collections/[^/]+/[^/]+/plugins/modules/', path):
+ is_module = True
+ elif path.startswith('test/lib/ansible_test/_data/'):
+ pass
+ elif path.startswith('lib/') or path.startswith('test/lib/'):
+ if executable:
+ print('%s:%d:%d: should not be executable' % (path, 0, 0))
+
+ if shebang:
+ print('%s:%d:%d: should not have a shebang' % (path, 0, 0))
+
+ continue
+ elif path.startswith('test/integration/targets/') or path.startswith('tests/integration/targets/'):
+ is_integration = True
+
+ if dirname.endswith('/library') or '/plugins/modules' in dirname or dirname in (
+ # non-standard module library directories
+ 'test/integration/targets/module_precedence/lib_no_extension',
+ 'test/integration/targets/module_precedence/lib_with_extension',
+ ):
+ is_module = True
+ elif path.startswith('plugins/modules/'):
+ is_module = True
+
+ if is_module:
+ if executable:
+ print('%s:%d:%d: module should not be executable' % (path, 0, 0))
+
+ ext = os.path.splitext(path)[1]
+ expected_shebang = module_shebangs.get(ext)
+ expected_ext = ' or '.join(['"%s"' % k for k in module_shebangs])
+
+ if expected_shebang:
+ if shebang == expected_shebang:
+ continue
+
+ print('%s:%d:%d: expected module shebang "%s" but found: %s' % (path, 1, 1, expected_shebang, shebang))
+ else:
+ print('%s:%d:%d: expected module extension %s but found: %s' % (path, 0, 0, expected_ext, ext))
+ else:
+ if is_integration:
+ allowed = integration_shebangs
+ else:
+ allowed = standard_shebangs
+
+ if shebang not in allowed:
+ print('%s:%d:%d: unexpected non-module shebang: %s' % (path, 1, 1, shebang))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json
new file mode 100644
index 00000000..6f13c86b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.json
@@ -0,0 +1,5 @@
+{
+ "include_directories": true,
+ "include_symlinks": true,
+ "output": "path-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py
new file mode 100755
index 00000000..0585c6b1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/symlinks.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ root_dir = os.getcwd() + os.path.sep
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ if not os.path.islink(path.rstrip(os.path.sep)):
+ continue
+
+ if not os.path.exists(path):
+ print('%s: broken symlinks are not allowed' % path)
+ continue
+
+ if path.endswith(os.path.sep):
+ print('%s: symlinks to directories are not allowed' % path)
+ continue
+
+ real_path = os.path.realpath(path)
+
+ if not real_path.startswith(root_dir):
+ print('%s: symlinks outside content tree are not allowed: %s' % (path, os.path.relpath(real_path, os.path.dirname(path))))
+ continue
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json
new file mode 100644
index 00000000..36103051
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.json
@@ -0,0 +1,10 @@
+{
+ "prefixes": [
+ "lib/ansible/modules/",
+ "plugins/modules/"
+ ],
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py
new file mode 100755
index 00000000..687136dc
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-argspec-type-path.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'(expanduser)', text)
+
+ if match:
+ print('%s:%d:%d: use argspec type="path" instead of type="str" to avoid use of `expanduser`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json
new file mode 100644
index 00000000..776590b7
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.json
@@ -0,0 +1,6 @@
+{
+ "extensions": [
+ ".py"
+ ],
+ "output": "path-line-column-message"
+}
diff --git a/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py
new file mode 100755
index 00000000..49cb76c5
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/code-smell/use-compat-six.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import sys
+
+
+def main():
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'r') as path_fd:
+ for line, text in enumerate(path_fd.readlines()):
+ match = re.search(r'((^\s*import\s+six\b)|(^\s*from\s+six\b))', text)
+
+ if match:
+ print('%s:%d:%d: use `ansible.module_utils.six` instead of `six`' % (
+ path, line + 1, match.start(1) + 1))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/compile/compile.py b/test/lib/ansible_test/_data/sanity/compile/compile.py
new file mode 100755
index 00000000..61910eee
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/compile/compile.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""Python syntax checker with lint friendly output."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import warnings
+
+with warnings.catch_warnings():
+ # The parser module is deprecated as of Python 3.9.
+ # This implementation will need to be updated to use another solution.
+ # Until then, disable the deprecation warnings to prevent test failures.
+ warnings.simplefilter('ignore', DeprecationWarning)
+ import parser
+
+import sys
+
+
+def main():
+ status = 0
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ with open(path, 'rb') as source_fd:
+ if sys.version_info[0] == 3:
+ source = source_fd.read().decode('utf-8')
+ else:
+ source = source_fd.read()
+
+ try:
+ parser.suite(source)
+ except SyntaxError:
+ ex = sys.exc_info()[1]
+ status = 1
+ message = ex.text.splitlines()[0].strip()
+ sys.stdout.write("%s:%d:%d: SyntaxError: %s\n" % (path, ex.lineno, ex.offset, message))
+ sys.stdout.flush()
+
+ sys.exit(status)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/import/importer.py b/test/lib/ansible_test/_data/sanity/import/importer.py
new file mode 100755
index 00000000..ef8db71b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/import/importer.py
@@ -0,0 +1,467 @@
+#!/usr/bin/env python
+"""Import the given python module(s) and report error(s) encountered."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def main():
+ """
+ Main program function used to isolate globals from imported code.
+ Changes to globals in imported modules on Python 2.x will overwrite our own globals.
+ """
+ import ansible
+ import contextlib
+ import datetime
+ import json
+ import os
+ import re
+ import runpy
+ import subprocess
+ import sys
+ import traceback
+ import types
+ import warnings
+
+ ansible_path = os.path.dirname(os.path.dirname(ansible.__file__))
+ temp_path = os.environ['SANITY_TEMP_PATH'] + os.path.sep
+ external_python = os.environ.get('SANITY_EXTERNAL_PYTHON') or sys.executable
+ collection_full_name = os.environ.get('SANITY_COLLECTION_FULL_NAME')
+ collection_root = os.environ.get('ANSIBLE_COLLECTIONS_PATH')
+
+ try:
+ # noinspection PyCompatibility
+ from importlib import import_module
+ except ImportError:
+ def import_module(name):
+ __import__(name)
+ return sys.modules[name]
+
+ try:
+ # noinspection PyCompatibility
+ from StringIO import StringIO
+ except ImportError:
+ from io import StringIO
+
+ if collection_full_name:
+ # allow importing code from collections when testing a collection
+ from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native, text_type
+ from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+ from ansible.utils.collection_loader import _collection_finder
+
+ yaml_to_json_path = os.path.join(os.path.dirname(__file__), 'yaml_to_json.py')
+ yaml_to_dict_cache = {}
+
+ # unique ISO date marker matching the one present in yaml_to_json.py
+ iso_date_marker = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
+ iso_date_re = re.compile('^%s([0-9]{4})-([0-9]{2})-([0-9]{2})$' % iso_date_marker)
+
+ def parse_value(value):
+ """Custom value parser for JSON deserialization that recognizes our internal ISO date format."""
+ if isinstance(value, text_type):
+ match = iso_date_re.search(value)
+
+ if match:
+ value = datetime.date(int(match.group(1)), int(match.group(2)), int(match.group(3)))
+
+ return value
+
+ def object_hook(data):
+ """Object hook for custom ISO date deserialization from JSON."""
+ return dict((key, parse_value(value)) for key, value in data.items())
+
+ def yaml_to_dict(yaml, content_id):
+ """
+ Return a Python dict version of the provided YAML.
+ Conversion is done in a subprocess since the current Python interpreter does not have access to PyYAML.
+ """
+ if content_id in yaml_to_dict_cache:
+ return yaml_to_dict_cache[content_id]
+
+ try:
+ cmd = [external_python, yaml_to_json_path]
+ proc = subprocess.Popen([to_bytes(c) for c in cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout_bytes, stderr_bytes = proc.communicate(to_bytes(yaml))
+
+ if proc.returncode != 0:
+ raise Exception('command %s failed with return code %d: %s' % ([to_native(c) for c in cmd], proc.returncode, to_native(stderr_bytes)))
+
+ data = yaml_to_dict_cache[content_id] = json.loads(to_text(stdout_bytes), object_hook=object_hook)
+
+ return data
+ except Exception as ex:
+ raise Exception('internal importer error - failed to parse yaml: %s' % to_native(ex))
+
+ _collection_finder._meta_yml_to_dict = yaml_to_dict # pylint: disable=protected-access
+
+ collection_loader = _AnsibleCollectionFinder(paths=[collection_root])
+ collection_loader._install() # pylint: disable=protected-access
+ else:
+ # do not support collection loading when not testing a collection
+ collection_loader = None
+
+ # remove all modules under the ansible package
+ list(map(sys.modules.pop, [m for m in sys.modules if m.partition('.')[0] == ansible.__name__]))
+
+ # pre-load an empty ansible package to prevent unwanted code in __init__.py from loading
+ # this more accurately reflects the environment that AnsiballZ runs modules under
+ # it also avoids issues with imports in the ansible package that are not allowed
+ ansible_module = types.ModuleType(ansible.__name__)
+ ansible_module.__file__ = ansible.__file__
+ ansible_module.__path__ = ansible.__path__
+ ansible_module.__package__ = ansible.__package__
+
+ sys.modules[ansible.__name__] = ansible_module
+
+ class ImporterAnsibleModuleException(Exception):
+ """Exception thrown during initialization of ImporterAnsibleModule."""
+
+ class ImporterAnsibleModule:
+ """Replacement for AnsibleModule to support import testing."""
+ def __init__(self, *args, **kwargs):
+ raise ImporterAnsibleModuleException()
+
+ class ImportBlacklist:
+ """Blacklist inappropriate imports."""
+ def __init__(self, path, name):
+ self.path = path
+ self.name = name
+ self.loaded_modules = set()
+
+ def find_module(self, fullname, path=None):
+ """Return self if the given fullname is blacklisted, otherwise return None.
+ :param fullname: str
+ :param path: str
+ :return: ImportBlacklist | None
+ """
+ if fullname in self.loaded_modules:
+ return None # ignore modules that are already being loaded
+
+ if is_name_in_namepace(fullname, ['ansible']):
+ if fullname in ('ansible.module_utils.basic', 'ansible.module_utils.common.removed'):
+ return self # intercept loading so we can modify the result
+
+ if is_name_in_namepace(fullname, ['ansible.module_utils', self.name]):
+ return None # module_utils and module under test are always allowed
+
+ if any(os.path.exists(candidate_path) for candidate_path in convert_ansible_name_to_absolute_paths(fullname)):
+ return self # blacklist ansible files that exist
+
+ return None # ansible file does not exist, do not blacklist
+
+ if is_name_in_namepace(fullname, ['ansible_collections']):
+ if not collection_loader:
+ return self # blacklist collections when we are not testing a collection
+
+ if is_name_in_namepace(fullname, ['ansible_collections...plugins.module_utils', self.name]):
+ return None # module_utils and module under test are always allowed
+
+ if collection_loader.find_module(fullname, path):
+ return self # blacklist collection files that exist
+
+ return None # collection file does not exist, do not blacklist
+
+ # not a namespace we care about
+ return None
+
+ def load_module(self, fullname):
+ """Raise an ImportError.
+ :type fullname: str
+ """
+ if fullname == 'ansible.module_utils.basic':
+ module = self.__load_module(fullname)
+
+ # stop Ansible module execution during AnsibleModule instantiation
+ module.AnsibleModule = ImporterAnsibleModule
+ # no-op for _load_params since it may be called before instantiating AnsibleModule
+ module._load_params = lambda *args, **kwargs: {} # pylint: disable=protected-access
+
+ return module
+
+ if fullname == 'ansible.module_utils.common.removed':
+ module = self.__load_module(fullname)
+
+ # no-op for removed_module since it is called in place of AnsibleModule instantiation
+ module.removed_module = lambda *args, **kwargs: None
+
+ return module
+
+ raise ImportError('import of "%s" is not allowed in this context' % fullname)
+
+ def __load_module(self, fullname):
+ """Load the requested module while avoiding infinite recursion.
+ :type fullname: str
+ :rtype: module
+ """
+ self.loaded_modules.add(fullname)
+ return import_module(fullname)
+
+ def run():
+ """Main program function."""
+ base_dir = os.getcwd()
+ messages = set()
+
+ for path in sys.argv[1:] or sys.stdin.read().splitlines():
+ name = convert_relative_path_to_name(path)
+ test_python_module(path, name, base_dir, messages)
+
+ if messages:
+ sys.exit(10)
+
+ def test_python_module(path, name, base_dir, messages):
+ """Test the given python module by importing it.
+ :type path: str
+ :type name: str
+ :type base_dir: str
+ :type messages: set[str]
+ """
+ if name in sys.modules:
+ return # cannot be tested because it has already been loaded
+
+ is_ansible_module = (path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/')) and os.path.basename(path) != '__init__.py'
+ run_main = is_ansible_module
+
+ if path == 'lib/ansible/modules/async_wrapper.py':
+ # async_wrapper is a non-standard Ansible module (does not use AnsibleModule) so we cannot test the main function
+ run_main = False
+
+ capture_normal = Capture()
+ capture_main = Capture()
+
+ try:
+ with monitor_sys_modules(path, messages):
+ with blacklist_imports(path, name, messages):
+ with capture_output(capture_normal):
+ import_module(name)
+
+ if run_main:
+ with monitor_sys_modules(path, messages):
+ with blacklist_imports(path, name, messages):
+ with capture_output(capture_main):
+ runpy.run_module(name, run_name='__main__', alter_sys=True)
+ except ImporterAnsibleModuleException:
+ # module instantiated AnsibleModule without raising an exception
+ pass
+ except BaseException as ex: # pylint: disable=locally-disabled, broad-except
+ # intentionally catch all exceptions, including calls to sys.exit
+ exc_type, _exc, exc_tb = sys.exc_info()
+ message = str(ex)
+ results = list(reversed(traceback.extract_tb(exc_tb)))
+ line = 0
+ offset = 0
+ full_path = os.path.join(base_dir, path)
+ base_path = base_dir + os.path.sep
+ source = None
+
+ # avoid line wraps in messages
+ message = re.sub(r'\n *', ': ', message)
+
+ for result in results:
+ if result[0] == full_path:
+ # save the line number for the file under test
+ line = result[1] or 0
+
+ if not source and result[0].startswith(base_path) and not result[0].startswith(temp_path):
+ # save the first path and line number in the traceback which is in our source tree
+ source = (os.path.relpath(result[0], base_path), result[1] or 0, 0)
+
+ if isinstance(ex, SyntaxError):
+ # SyntaxError has better information than the traceback
+ if ex.filename == full_path: # pylint: disable=locally-disabled, no-member
+ # syntax error was reported in the file under test
+ line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
+ offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
+ elif ex.filename.startswith(base_path) and not ex.filename.startswith(temp_path): # pylint: disable=locally-disabled, no-member
+ # syntax error was reported in our source tree
+ source = (os.path.relpath(ex.filename, base_path), ex.lineno or 0, ex.offset or 0) # pylint: disable=locally-disabled, no-member
+
+ # remove the filename and line number from the message
+ # either it was extracted above, or it's not really useful information
+ message = re.sub(r' \(.*?, line [0-9]+\)$', '', message)
+
+ if source and source[0] != path:
+ message += ' (at %s:%d:%d)' % (source[0], source[1], source[2])
+
+ report_message(path, line, offset, 'traceback', '%s: %s' % (exc_type.__name__, message), messages)
+ finally:
+ capture_report(path, capture_normal, messages)
+ capture_report(path, capture_main, messages)
+
+ def is_name_in_namepace(name, namespaces):
+ """Returns True if the given name is one of the given namespaces, otherwise returns False."""
+ name_parts = name.split('.')
+
+ for namespace in namespaces:
+ namespace_parts = namespace.split('.')
+ length = min(len(name_parts), len(namespace_parts))
+
+ truncated_name = name_parts[0:length]
+ truncated_namespace = namespace_parts[0:length]
+
+ # empty parts in the namespace are treated as wildcards
+ # to simplify the comparison, use those empty parts to indicate the positions in the name to be empty as well
+ for idx, part in enumerate(truncated_namespace):
+ if not part:
+ truncated_name[idx] = part
+
+ # example: name=ansible, allowed_name=ansible.module_utils
+ # example: name=ansible.module_utils.system.ping, allowed_name=ansible.module_utils
+ if truncated_name == truncated_namespace:
+ return True
+
+ return False
+
+ def check_sys_modules(path, before, messages):
+ """Check for unwanted changes to sys.modules.
+ :type path: str
+ :type before: dict[str, module]
+ :type messages: set[str]
+ """
+ after = sys.modules
+ removed = set(before.keys()) - set(after.keys())
+ changed = set(key for key, value in before.items() if key in after and value != after[key])
+
+ # additions are checked by our custom PEP 302 loader, so we don't need to check them again here
+
+ for module in sorted(removed):
+ report_message(path, 0, 0, 'unload', 'unloading of "%s" in sys.modules is not supported' % module, messages)
+
+ for module in sorted(changed):
+ report_message(path, 0, 0, 'reload', 'reloading of "%s" in sys.modules is not supported' % module, messages)
+
+ def convert_ansible_name_to_absolute_paths(name):
+ """Calculate the module path from the given name.
+ :type name: str
+ :rtype: list[str]
+ """
+ return [
+ os.path.join(ansible_path, name.replace('.', os.path.sep)),
+ os.path.join(ansible_path, name.replace('.', os.path.sep)) + '.py',
+ ]
+
+ def convert_relative_path_to_name(path):
+ """Calculate the module name from the given path.
+ :type path: str
+ :rtype: str
+ """
+ if path.endswith('/__init__.py'):
+ clean_path = os.path.dirname(path)
+ else:
+ clean_path = path
+
+ clean_path = os.path.splitext(clean_path)[0]
+
+ name = clean_path.replace(os.path.sep, '.')
+
+ if collection_loader:
+ # when testing collections the relative paths (and names) being tested are within the collection under test
+ name = 'ansible_collections.%s.%s' % (collection_full_name, name)
+ else:
+ # when testing ansible all files being imported reside under the lib directory
+ name = name[len('lib/'):]
+
+ return name
+
+ class Capture:
+ """Captured output and/or exception."""
+ def __init__(self):
+ self.stdout = StringIO()
+ self.stderr = StringIO()
+
+ def capture_report(path, capture, messages):
+ """Report on captured output.
+ :type path: str
+ :type capture: Capture
+ :type messages: set[str]
+ """
+ if capture.stdout.getvalue():
+ first = capture.stdout.getvalue().strip().splitlines()[0].strip()
+ report_message(path, 0, 0, 'stdout', first, messages)
+
+ if capture.stderr.getvalue():
+ first = capture.stderr.getvalue().strip().splitlines()[0].strip()
+ report_message(path, 0, 0, 'stderr', first, messages)
+
+ def report_message(path, line, column, code, message, messages):
+ """Report message if not already reported.
+ :type path: str
+ :type line: int
+ :type column: int
+ :type code: str
+ :type message: str
+ :type messages: set[str]
+ """
+ message = '%s:%d:%d: %s: %s' % (path, line, column, code, message)
+
+ if message not in messages:
+ messages.add(message)
+ print(message)
+
+ @contextlib.contextmanager
+ def blacklist_imports(path, name, messages):
+ """Blacklist imports.
+ :type path: str
+ :type name: str
+ :type messages: set[str]
+ """
+ blacklist = ImportBlacklist(path, name)
+
+ sys.meta_path.insert(0, blacklist)
+ sys.path_importer_cache.clear()
+
+ try:
+ yield
+ finally:
+ if sys.meta_path[0] != blacklist:
+ report_message(path, 0, 0, 'metapath', 'changes to sys.meta_path[0] are not permitted', messages)
+
+ while blacklist in sys.meta_path:
+ sys.meta_path.remove(blacklist)
+
+ sys.path_importer_cache.clear()
+
+ @contextlib.contextmanager
+ def monitor_sys_modules(path, messages):
+ """Monitor sys.modules for unwanted changes, reverting any additions made to our own namespaces."""
+ snapshot = sys.modules.copy()
+
+ try:
+ yield
+ finally:
+ check_sys_modules(path, snapshot, messages)
+
+ for key in set(sys.modules.keys()) - set(snapshot.keys()):
+ if is_name_in_namepace(key, ('ansible', 'ansible_collections')):
+ del sys.modules[key] # only unload our own code since we know it's native Python
+
+ @contextlib.contextmanager
+ def capture_output(capture):
+ """Capture sys.stdout and sys.stderr.
+ :type capture: Capture
+ """
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+
+ sys.stdout = capture.stdout
+ sys.stderr = capture.stderr
+
+ # clear all warnings registries to make all warnings available
+ for module in sys.modules.values():
+ try:
+ module.__warningregistry__.clear()
+ except AttributeError:
+ pass
+
+ with warnings.catch_warnings():
+ warnings.simplefilter('error')
+
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+
+ run()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py b/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py
new file mode 100644
index 00000000..09be9576
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/import/yaml_to_json.py
@@ -0,0 +1,27 @@
+"""Read YAML from stdin and write JSON to stdout."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import json
+import sys
+
+from yaml import load
+
+try:
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+# unique ISO date marker matching the one present in importer.py
+ISO_DATE_MARKER = 'isodate:f23983df-f3df-453c-9904-bcd08af468cc:'
+
+
+def default(value):
+ if isinstance(value, datetime.date):
+ return '%s%s' % (ISO_DATE_MARKER, value.isoformat())
+
+ raise TypeError('cannot serialize type: %s' % type(value))
+
+
+json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout, default=default)
diff --git a/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py b/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py
new file mode 100644
index 00000000..74a45f00
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/integration-aliases/yaml_to_json.py
@@ -0,0 +1,15 @@
+"""Read YAML from stdin and write JSON to stdout."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import sys
+
+from yaml import load
+
+try:
+ from yaml import CSafeLoader as SafeLoader
+except ImportError:
+ from yaml import SafeLoader
+
+json.dump(load(sys.stdin, Loader=SafeLoader), sys.stdout)
diff --git a/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt b/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt
new file mode 100644
index 00000000..659c7f59
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pep8/current-ignore.txt
@@ -0,0 +1,4 @@
+E402
+W503
+W504
+E741
diff --git a/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1 b/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1
new file mode 100755
index 00000000..1ef2743a
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pslint/pslint.ps1
@@ -0,0 +1,43 @@
+#!/usr/bin/env pwsh
+#Requires -Version 6
+#Requires -Modules PSScriptAnalyzer, PSSA-PSCustomUseLiteralPath
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$WarningPreference = "Stop"
+
+# Until https://github.com/PowerShell/PSScriptAnalyzer/issues/1217 is fixed we need to import Pester if it's
+# available.
+if (Get-Module -Name Pester -ListAvailable -ErrorAction SilentlyContinue) {
+ Import-Module -Name Pester
+}
+
+$LiteralPathRule = Import-Module -Name PSSA-PSCustomUseLiteralPath -PassThru
+$LiteralPathRulePath = Join-Path -Path $LiteralPathRule.ModuleBase -ChildPath $LiteralPathRule.RootModule
+
+$PSSAParams = @{
+ CustomRulePath = @($LiteralPathRulePath)
+ IncludeDefaultRules = $true
+ Setting = (Join-Path -Path $PSScriptRoot -ChildPath "settings.psd1")
+}
+
+$Results = @()
+
+ForEach ($Path in $Args) {
+ $Retries = 3
+
+ Do {
+ Try {
+ $Results += Invoke-ScriptAnalyzer -Path $Path @PSSAParams 3> $null
+ $Retries = 0
+ }
+ Catch {
+ If (--$Retries -le 0) {
+ Throw
+ }
+ }
+ }
+ Until ($Retries -le 0)
+}
+
+ConvertTo-Json -InputObject $Results
diff --git a/test/lib/ansible_test/_data/sanity/pslint/settings.psd1 b/test/lib/ansible_test/_data/sanity/pslint/settings.psd1
new file mode 100644
index 00000000..7646ec35
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pslint/settings.psd1
@@ -0,0 +1,13 @@
+@{
+ ExcludeRules=@(
+ 'PSUseOutputTypeCorrectly',
+ 'PSUseShouldProcessForStateChangingFunctions',
+ # We send strings as plaintext so will always come across the 3 issues
+ 'PSAvoidUsingPlainTextForPassword',
+ 'PSAvoidUsingConvertToSecureStringWithPlainText',
+ 'PSAvoidUsingUserNameAndPassWordParams',
+ # We send the module as a base64 encoded string and a BOM will cause
+ # issues here
+ 'PSUseBOMForUnicodeEncodedFile'
+ )
+}
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg
new file mode 100644
index 00000000..d3643162
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/ansible-test.cfg
@@ -0,0 +1,39 @@
+[MESSAGES CONTROL]
+
+disable=
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ too-few-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-return-statements,
+ too-many-statements,
+ no-self-use,
+ unused-import, # pylint does not understand PEP 484 type hints
+ consider-using-dict-comprehension, # requires Python 2.6, which we still support
+ consider-using-set-comprehension, # requires Python 2.6, which we still support
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+ C,
+ __metaclass__,
+
+method-rgx=[a-z_][a-z0-9_]{2,40}$
+function-rgx=[a-z_][a-z0-9_]{2,40}$
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg
new file mode 100644
index 00000000..c2d75b1c
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/collection.cfg
@@ -0,0 +1,135 @@
+[MESSAGES CONTROL]
+
+disable=
+ abstract-method,
+ access-member-before-definition,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-continuation,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension,
+ consider-using-enumerate,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension,
+ consider-using-ternary,
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-lambda,
+ deprecated-method,
+ deprecated-module,
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error,
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ misplaced-comparison-constant,
+ missing-docstring,
+ no-else-raise,
+ no-else-return,
+ no-init,
+ no-member,
+ no-name-in-module,
+ no-self-use,
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ old-style-class,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redefined-variable-type,
+ reimported,
+ relative-beyond-top-level, # https://github.com/PyCQA/pylint/issues/2967
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ used-before-assignment,
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+
+[TYPECHECK]
+
+ignored-modules=
+ _MovedItems,
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg
new file mode 100644
index 00000000..45199078
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/default.cfg
@@ -0,0 +1,135 @@
+[MESSAGES CONTROL]
+
+disable=
+ abstract-method,
+ access-member-before-definition,
+ arguments-differ,
+ assignment-from-no-return,
+ assignment-from-none,
+ attribute-defined-outside-init,
+ bad-continuation,
+ bad-indentation,
+ bad-mcs-classmethod-argument,
+ broad-except,
+ c-extension-no-member,
+ cell-var-from-loop,
+ chained-comparison,
+ comparison-with-callable,
+ consider-iterating-dictionary,
+ consider-merging-isinstance,
+ consider-using-dict-comprehension,
+ consider-using-enumerate,
+ consider-using-get,
+ consider-using-in,
+ consider-using-set-comprehension,
+ consider-using-ternary,
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ deprecated-lambda,
+ deprecated-method,
+ deprecated-module,
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ eval-used,
+ exec-used,
+ expression-not-assigned,
+ fixme,
+ function-redefined,
+ global-statement,
+ global-variable-undefined,
+ import-error,
+ import-self,
+ inconsistent-return-statements,
+ invalid-envvar-default,
+ invalid-name,
+ invalid-sequence-index,
+ keyword-arg-before-vararg,
+ len-as-condition,
+ line-too-long,
+ literal-comparison,
+ locally-disabled,
+ method-hidden,
+ misplaced-comparison-constant,
+ missing-docstring,
+ no-else-raise,
+ no-else-return,
+ no-init,
+ no-member,
+ no-name-in-module,
+ no-self-use,
+ no-value-for-parameter,
+ non-iterator-returned,
+ not-a-mapping,
+ not-an-iterable,
+ not-callable,
+ old-style-class,
+ pointless-statement,
+ pointless-string-statement,
+ possibly-unused-variable,
+ protected-access,
+ redefined-argument-from-local,
+ redefined-builtin,
+ redefined-outer-name,
+ redefined-variable-type,
+ reimported,
+ relative-import,
+ signature-differs,
+ simplifiable-if-expression,
+ simplifiable-if-statement,
+ subprocess-popen-preexec-fn,
+ super-init-not-called,
+ superfluous-parens,
+ too-few-public-methods,
+ too-many-ancestors,
+ too-many-arguments,
+ too-many-boolean-expressions,
+ too-many-branches,
+ too-many-function-args,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-public-methods,
+ too-many-return-statements,
+ too-many-statements,
+ trailing-comma-tuple,
+ trailing-comma-tuple,
+ try-except-raise,
+ unbalanced-tuple-unpacking,
+ undefined-loop-variable,
+ unexpected-keyword-arg,
+ ungrouped-imports,
+ unidiomatic-typecheck,
+ unnecessary-pass,
+ unsubscriptable-object,
+ unsupported-assignment-operation,
+ unsupported-delete-operation,
+ unsupported-membership-test,
+ unused-argument,
+ unused-import,
+ unused-variable,
+ used-before-assignment,
+ useless-object-inheritance,
+ useless-return,
+ useless-super-delegation,
+ wrong-import-order,
+ wrong-import-position,
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ ex,
+ Run,
+
+[TYPECHECK]
+
+ignored-modules=
+ _MovedItems,
diff --git a/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg b/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg
new file mode 100644
index 00000000..f601ab57
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/config/sanity.cfg
@@ -0,0 +1,42 @@
+[MESSAGES CONTROL]
+
+disable=
+ cyclic-import, # consistent results require running with --jobs 1 and testing all files
+ duplicate-code, # consistent results require running with --jobs 1 and testing all files
+ too-few-public-methods,
+ too-many-arguments,
+ too-many-branches,
+ too-many-instance-attributes,
+ too-many-lines,
+ too-many-locals,
+ too-many-nested-blocks,
+ too-many-return-statements,
+ too-many-statements,
+ missing-docstring,
+ unused-import, # pylint does not understand PEP 484 type hints
+ consider-using-dict-comprehension, # requires Python 2.6, which we still support
+ consider-using-set-comprehension, # requires Python 2.6, which we still support
+
+[BASIC]
+
+bad-names=foo,
+ bar,
+ baz,
+ toto,
+ tutu,
+ tata,
+ _,
+
+good-names=i,
+ j,
+ k,
+ f,
+ e,
+ ex,
+ Run,
+ C,
+ __metaclass__,
+
+module-rgx=[a-z_][a-z0-9_-]{2,40}$
+method-rgx=[a-z_][a-z0-9_]{2,40}$
+function-rgx=[a-z_][a-z0-9_]{2,40}$
diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py
new file mode 100644
index 00000000..c06059c4
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/deprecated.py
@@ -0,0 +1,250 @@
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import re
+
+from distutils.version import LooseVersion
+
+import astroid
+
+from pylint.interfaces import IAstroidChecker
+from pylint.checkers import BaseChecker
+from pylint.checkers.utils import check_messages
+
+from ansible.module_utils.six import string_types
+from ansible.release import __version__ as ansible_version_raw
+from ansible.utils.version import SemanticVersion
+
+MSGS = {
+ 'E9501': ("Deprecated version (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "ansible-deprecated-version",
+ "Used when a call to Display.deprecated specifies a version "
+ "less than or equal to the current version of Ansible",
+ {'minversion': (2, 6)}),
+ 'E9502': ("Display.deprecated call without a version or date",
+ "ansible-deprecated-no-version",
+ "Used when a call to Display.deprecated does not specify a "
+ "version or date",
+ {'minversion': (2, 6)}),
+ 'E9503': ("Invalid deprecated version (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-invalid-deprecated-version",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "Ansible version number",
+ {'minversion': (2, 6)}),
+ 'E9504': ("Deprecated version (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "collection-deprecated-version",
+ "Used when a call to Display.deprecated specifies a collection "
+ "version less than or equal to the current version of this "
+ "collection",
+ {'minversion': (2, 6)}),
+ 'E9505': ("Invalid deprecated version (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "collection-invalid-deprecated-version",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "collection version number",
+ {'minversion': (2, 6)}),
+ 'E9506': ("No collection name found in call to Display.deprecated or "
+ "AnsibleModule.deprecate",
+ "ansible-deprecated-no-collection-name",
+ "The current collection name in format `namespace.name` must "
+ "be provided as collection_name when calling Display.deprecated "
+ "or AnsibleModule.deprecate (`ansible.builtin` for ansible-base)",
+ {'minversion': (2, 6)}),
+ 'E9507': ("Wrong collection name (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "wrong-collection-deprecated",
+ "The name of the current collection must be passed to the "
+ "Display.deprecated resp. AnsibleModule.deprecate calls "
+ "(`ansible.builtin` for ansible-base)",
+ {'minversion': (2, 6)}),
+ 'E9508': ("Expired date (%r) found in call to Display.deprecated "
+ "or AnsibleModule.deprecate",
+ "ansible-deprecated-date",
+ "Used when a call to Display.deprecated specifies a date "
+ "before today",
+ {'minversion': (2, 6)}),
+ 'E9509': ("Invalid deprecated date (%r) found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-invalid-deprecated-date",
+ "Used when a call to Display.deprecated specifies an invalid "
+ "date. It must be a string in format `YYYY-MM-DD` (ISO 8601)",
+ {'minversion': (2, 6)}),
+ 'E9510': ("Both version and date found in call to "
+ "Display.deprecated or AnsibleModule.deprecate",
+ "ansible-deprecated-both-version-and-date",
+ "Only one of version and date must be specified",
+ {'minversion': (2, 6)}),
+}
+
+
+ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version_raw.split('.')[:3]))
+
+
+def _get_expr_name(node):
+ """Funciton to get either ``attrname`` or ``name`` from ``node.func.expr``
+
+ Created specifically for the case of ``display.deprecated`` or ``self._display.deprecated``
+ """
+ try:
+ return node.func.expr.attrname
+ except AttributeError:
+ # If this fails too, we'll let it raise, the caller should catch it
+ return node.func.expr.name
+
+
+def parse_isodate(value):
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
+ if not isinstance(value, string_types):
+ raise ValueError(msg)
+ # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
+ # we have to do things manually.
+ if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
+ raise ValueError(msg)
+ try:
+ return datetime.datetime.strptime(value, '%Y-%m-%d').date()
+ except ValueError:
+ raise ValueError(msg)
+
+
+class AnsibleDeprecatedChecker(BaseChecker):
+ """Checks for Display.deprecated calls to ensure that the ``version``
+ has not passed or met the time for removal
+ """
+
+ __implements__ = (IAstroidChecker,)
+ name = 'deprecated'
+ msgs = MSGS
+
+ options = (
+ ('collection-name', {
+ 'default': None,
+ 'type': 'string',
+ 'metavar': '<name>',
+ 'help': 'The collection\'s name used to check collection names in deprecations.',
+ }),
+ ('collection-version', {
+ 'default': None,
+ 'type': 'string',
+ 'metavar': '<version>',
+ 'help': 'The collection\'s version number used to check deprecations.',
+ }),
+ )
+
+ def __init__(self, *args, **kwargs):
+ self.collection_version = None
+ self.collection_name = None
+ super(AnsibleDeprecatedChecker, self).__init__(*args, **kwargs)
+
+ def set_option(self, optname, value, action=None, optdict=None):
+ super(AnsibleDeprecatedChecker, self).set_option(optname, value, action, optdict)
+ if optname == 'collection-version' and value is not None:
+ self.collection_version = SemanticVersion(self.config.collection_version)
+ if optname == 'collection-name' and value is not None:
+ self.collection_name = self.config.collection_name
+
+ def _check_date(self, node, date):
+ if not isinstance(date, str):
+ self.add_message('invalid-date', node=node, args=(date,))
+ return
+
+ try:
+ date_parsed = parse_isodate(date)
+ except ValueError:
+ self.add_message('ansible-invalid-deprecated-date', node=node, args=(date,))
+ return
+
+ if date_parsed < datetime.date.today():
+ self.add_message('ansible-deprecated-date', node=node, args=(date,))
+
+ def _check_version(self, node, version, collection_name):
+ if not isinstance(version, (str, float)):
+ self.add_message('invalid-version', node=node, args=(version,))
+ return
+
+ version_no = str(version)
+
+ if collection_name == 'ansible.builtin':
+ # Ansible-base
+ try:
+ if not version_no:
+ raise ValueError('Version string should not be empty')
+ loose_version = LooseVersion(str(version_no))
+ if ANSIBLE_VERSION >= loose_version:
+ self.add_message('ansible-deprecated-version', node=node, args=(version,))
+ except ValueError:
+ self.add_message('ansible-invalid-deprecated-version', node=node, args=(version,))
+ elif collection_name:
+ # Collections
+ try:
+ if not version_no:
+ raise ValueError('Version string should not be empty')
+ semantic_version = SemanticVersion(version_no)
+ if collection_name == self.collection_name and self.collection_version is not None:
+ if self.collection_version >= semantic_version:
+ self.add_message('collection-deprecated-version', node=node, args=(version,))
+ except ValueError:
+ self.add_message('collection-invalid-deprecated-version', node=node, args=(version,))
+
+ @check_messages(*(MSGS.keys()))
+ def visit_call(self, node):
+ version = None
+ date = None
+ collection_name = None
+ try:
+ if (node.func.attrname == 'deprecated' and 'display' in _get_expr_name(node) or
+ node.func.attrname == 'deprecate' and _get_expr_name(node)):
+ if node.keywords:
+ for keyword in node.keywords:
+ if len(node.keywords) == 1 and keyword.arg is None:
+ # This is likely a **kwargs splat
+ return
+ if keyword.arg == 'version':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ version = keyword.value.value
+ if keyword.arg == 'date':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ date = keyword.value.value
+ if keyword.arg == 'collection_name':
+ if isinstance(keyword.value.value, astroid.Name):
+ # This is likely a variable
+ return
+ collection_name = keyword.value.value
+ if not version and not date:
+ try:
+ version = node.args[1].value
+ except IndexError:
+ self.add_message('ansible-deprecated-no-version', node=node)
+ return
+ if version and date:
+ self.add_message('ansible-deprecated-both-version-and-date', node=node)
+
+ if collection_name:
+ this_collection = collection_name == (self.collection_name or 'ansible.builtin')
+ if not this_collection:
+ self.add_message('wrong-collection-deprecated', node=node, args=(collection_name,))
+ elif self.collection_name is not None:
+ self.add_message('ansible-deprecated-no-collection-name', node=node)
+
+ if date:
+ self._check_date(node, date)
+ elif version:
+ self._check_version(node, version, collection_name)
+ except AttributeError:
+ # Not the type of node we are interested in
+ pass
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleDeprecatedChecker(linter))
diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py
new file mode 100644
index 00000000..eafde73b
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/string_format.py
@@ -0,0 +1,90 @@
+# (c) 2018, Matt Martz <matt@sivel.net>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# -*- coding: utf-8 -*-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import six
+
+import astroid
+from pylint.interfaces import IAstroidChecker
+from pylint.checkers import BaseChecker
+from pylint.checkers import utils
+from pylint.checkers.utils import check_messages
+try:
+ from pylint.checkers.utils import parse_format_method_string
+except ImportError:
+ # noinspection PyUnresolvedReferences
+ from pylint.checkers.strings import parse_format_method_string
+
+_PY3K = sys.version_info[:2] >= (3, 0)
+
+MSGS = {
+ 'E9305': ("Format string contains automatic field numbering "
+ "specification",
+ "ansible-format-automatic-specification",
+ "Used when a PEP 3101 format string contains automatic "
+ "field numbering (e.g. '{}').",
+ {'minversion': (2, 6)}),
+ 'E9390': ("bytes object has no .format attribute",
+ "ansible-no-format-on-bytestring",
+ "Used when a bytestring was used as a PEP 3101 format string "
+ "as Python3 bytestrings do not have a .format attribute",
+ {'minversion': (3, 0)}),
+}
+
+
+class AnsibleStringFormatChecker(BaseChecker):
+ """Checks string formatting operations to ensure that the format string
+ is valid and the arguments match the format string.
+ """
+
+ __implements__ = (IAstroidChecker,)
+ name = 'string'
+ msgs = MSGS
+
+ @check_messages(*(MSGS.keys()))
+ def visit_call(self, node):
+ func = utils.safe_infer(node.func)
+ if (isinstance(func, astroid.BoundMethod)
+ and isinstance(func.bound, astroid.Instance)
+ and func.bound.name in ('str', 'unicode', 'bytes')):
+ if func.name == 'format':
+ self._check_new_format(node, func)
+
+ def _check_new_format(self, node, func):
+ """ Check the new string formatting """
+ if (isinstance(node.func, astroid.Attribute)
+ and not isinstance(node.func.expr, astroid.Const)):
+ return
+ try:
+ strnode = next(func.bound.infer())
+ except astroid.InferenceError:
+ return
+ if not isinstance(strnode, astroid.Const):
+ return
+
+ if _PY3K and isinstance(strnode.value, six.binary_type):
+ self.add_message('ansible-no-format-on-bytestring', node=node)
+ return
+ if not isinstance(strnode.value, six.string_types):
+ return
+
+ if node.starargs or node.kwargs:
+ return
+ try:
+ num_args = parse_format_method_string(strnode.value)[1]
+ except utils.IncompleteFormatString:
+ return
+
+ if num_args:
+ self.add_message('ansible-format-automatic-specification',
+ node=node)
+ return
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleStringFormatChecker(linter))
diff --git a/test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py b/test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py
new file mode 100644
index 00000000..7012feaa
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/pylint/plugins/unwanted.py
@@ -0,0 +1,242 @@
+"""A plugin for pylint to identify imports and functions which should not be used."""
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import os
+
+import astroid
+
+from pylint.checkers import BaseChecker
+from pylint.interfaces import IAstroidChecker
+
+ANSIBLE_TEST_MODULES_PATH = os.environ['ANSIBLE_TEST_MODULES_PATH']
+ANSIBLE_TEST_MODULE_UTILS_PATH = os.environ['ANSIBLE_TEST_MODULE_UTILS_PATH']
+
+
+class UnwantedEntry:
+ """Defines an unwanted import."""
+ def __init__(self, alternative, modules_only=False, names=None, ignore_paths=None):
+ """
+ :type alternative: str
+ :type modules_only: bool
+ :type names: tuple[str] | None
+ :type ignore_paths: tuple[str] | None
+ """
+ self.alternative = alternative
+ self.modules_only = modules_only
+ self.names = set(names) if names else set()
+ self.ignore_paths = ignore_paths
+
+ def applies_to(self, path, name=None):
+ """
+ :type path: str
+ :type name: str | None
+ :rtype: bool
+ """
+ if self.names:
+ if not name:
+ return False
+
+ if name not in self.names:
+ return False
+
+ if self.ignore_paths and any(path.endswith(ignore_path) for ignore_path in self.ignore_paths):
+ return False
+
+ if self.modules_only:
+ return is_module_path(path)
+
+ return True
+
+
+def is_module_path(path):
+ """
+ :type path: str
+ :rtype: bool
+ """
+ return path.startswith(ANSIBLE_TEST_MODULES_PATH) or path.startswith(ANSIBLE_TEST_MODULE_UTILS_PATH)
+
+
+class AnsibleUnwantedChecker(BaseChecker):
+ """Checker for unwanted imports and functions."""
+ __implements__ = (IAstroidChecker,)
+
+ name = 'unwanted'
+
+ BAD_IMPORT = 'ansible-bad-import'
+ BAD_IMPORT_FROM = 'ansible-bad-import-from'
+ BAD_FUNCTION = 'ansible-bad-function'
+ BAD_MODULE_IMPORT = 'ansible-bad-module-import'
+
+ msgs = dict(
+ E5101=('Import %s instead of %s',
+ BAD_IMPORT,
+ 'Identifies imports which should not be used.'),
+ E5102=('Import %s from %s instead of %s',
+ BAD_IMPORT_FROM,
+ 'Identifies imports which should not be used.'),
+ E5103=('Call %s instead of %s',
+ BAD_FUNCTION,
+ 'Identifies functions which should not be used.'),
+ E5104=('Import external package or ansible.module_utils not %s',
+ BAD_MODULE_IMPORT,
+ 'Identifies imports which should not be used.'),
+ )
+
+ unwanted_imports = dict(
+ # Additional imports that we may want to start checking:
+ # boto=UnwantedEntry('boto3', modules_only=True),
+ # requests=UnwantedEntry('ansible.module_utils.urls', modules_only=True),
+ # urllib=UnwantedEntry('ansible.module_utils.urls', modules_only=True),
+
+ # see https://docs.python.org/2/library/urllib2.html
+ urllib2=UnwantedEntry('ansible.module_utils.urls',
+ ignore_paths=(
+ '/lib/ansible/module_utils/urls.py',
+ )),
+
+ # see https://docs.python.org/3.7/library/collections.abc.html
+ collections=UnwantedEntry('ansible.module_utils.common._collections_compat',
+ ignore_paths=(
+ '/lib/ansible/module_utils/common/_collections_compat.py',
+ ),
+ names=(
+ 'MappingView',
+ 'ItemsView',
+ 'KeysView',
+ 'ValuesView',
+ 'Mapping', 'MutableMapping',
+ 'Sequence', 'MutableSequence',
+ 'Set', 'MutableSet',
+ 'Container',
+ 'Hashable',
+ 'Sized',
+ 'Callable',
+ 'Iterable',
+ 'Iterator',
+ )),
+ )
+
+ unwanted_functions = {
+ # see https://docs.python.org/2/library/tempfile.html#tempfile.mktemp
+ 'tempfile.mktemp': UnwantedEntry('tempfile.mkstemp'),
+
+ 'sys.exit': UnwantedEntry('exit_json or fail_json',
+ ignore_paths=(
+ '/lib/ansible/module_utils/basic.py',
+ '/lib/ansible/modules/async_wrapper.py',
+ '/lib/ansible/module_utils/common/removed.py',
+ ),
+ modules_only=True),
+
+ 'builtins.print': UnwantedEntry('module.log or module.debug',
+ ignore_paths=(
+ '/lib/ansible/module_utils/basic.py',
+ '/lib/ansible/module_utils/common/removed.py',
+ ),
+ modules_only=True),
+ }
+
+ def visit_import(self, node):
+ """
+ :type node: astroid.node_classes.Import
+ """
+ for name in node.names:
+ self._check_import(node, name[0])
+
+ def visit_importfrom(self, node):
+ """
+ :type node: astroid.node_classes.ImportFrom
+ """
+ self._check_importfrom(node, node.modname, node.names)
+
+ def visit_attribute(self, node):
+ """
+ :type node: astroid.node_classes.Attribute
+ """
+ last_child = node.last_child()
+
+ # this is faster than using type inference and will catch the most common cases
+ if not isinstance(last_child, astroid.node_classes.Name):
+ return
+
+ module = last_child.name
+
+ entry = self.unwanted_imports.get(module)
+
+ if entry and entry.names:
+ if entry.applies_to(self.linter.current_file, node.attrname):
+ self.add_message(self.BAD_IMPORT_FROM, args=(node.attrname, entry.alternative, module), node=node)
+
+ def visit_call(self, node):
+ """
+ :type node: astroid.node_classes.Call
+ """
+ try:
+ for i in node.func.inferred():
+ func = None
+
+ if isinstance(i, astroid.scoped_nodes.FunctionDef) and isinstance(i.parent, astroid.scoped_nodes.Module):
+ func = '%s.%s' % (i.parent.name, i.name)
+
+ if not func:
+ continue
+
+ entry = self.unwanted_functions.get(func)
+
+ if entry and entry.applies_to(self.linter.current_file):
+ self.add_message(self.BAD_FUNCTION, args=(entry.alternative, func), node=node)
+ except astroid.exceptions.InferenceError:
+ pass
+
+ def _check_import(self, node, modname):
+ """
+ :type node: astroid.node_classes.Import
+ :type modname: str
+ """
+ self._check_module_import(node, modname)
+
+ entry = self.unwanted_imports.get(modname)
+
+ if not entry:
+ return
+
+ if entry.applies_to(self.linter.current_file):
+ self.add_message(self.BAD_IMPORT, args=(entry.alternative, modname), node=node)
+
+ def _check_importfrom(self, node, modname, names):
+ """
+ :type node: astroid.node_classes.ImportFrom
+ :type modname: str
+ :type names: list[str[
+ """
+ self._check_module_import(node, modname)
+
+ entry = self.unwanted_imports.get(modname)
+
+ if not entry:
+ return
+
+ for name in names:
+ if entry.applies_to(self.linter.current_file, name[0]):
+ self.add_message(self.BAD_IMPORT_FROM, args=(name[0], entry.alternative, modname), node=node)
+
+ def _check_module_import(self, node, modname):
+ """
+ :type node: astroid.node_classes.Import | astroid.node_classes.ImportFrom
+ :type modname: str
+ """
+ if not is_module_path(self.linter.current_file):
+ return
+
+ if modname == 'ansible.module_utils' or modname.startswith('ansible.module_utils.'):
+ return
+
+ if modname == 'ansible' or modname.startswith('ansible.'):
+ self.add_message(self.BAD_MODULE_IMPORT, args=(modname,), node=node)
+
+
+def register(linter):
+ """required method to auto register this checker """
+ linter.register_checker(AnsibleUnwantedChecker(linter))
diff --git a/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt b/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt
new file mode 100644
index 00000000..961e9bd9
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/rstcheck/ignore-substitutions.txt
@@ -0,0 +1,5 @@
+version
+release
+today
+br
+_
diff --git a/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt b/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt
new file mode 100644
index 00000000..29588ddd
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/shellcheck/exclude.txt
@@ -0,0 +1,3 @@
+SC1090
+SC1091
+SC2164
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/main.py b/test/lib/ansible_test/_data/sanity/validate-modules/main.py
new file mode 100755
index 00000000..c1e2bdaa
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/main.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from validate_modules.main import main
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules b/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules
new file mode 120000
index 00000000..11a5d8e1
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate-modules
@@ -0,0 +1 @@
+main.py \ No newline at end of file
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py
new file mode 100644
index 00000000..d8ff2dc0
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/__init__.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+__version__ = '0.0.1b'
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py
new file mode 100644
index 00000000..e7379288
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py
@@ -0,0 +1,2444 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import argparse
+import ast
+import datetime
+import json
+import errno
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import traceback
+
+from collections import OrderedDict
+from contextlib import contextmanager
+from distutils.version import StrictVersion, LooseVersion
+from fnmatch import fnmatch
+
+import yaml
+
+from ansible import __version__ as ansible_version
+from ansible.executor.module_common import REPLACER_WINDOWS
+from ansible.module_utils.common._collections_compat import Mapping
+from ansible.module_utils._text import to_native
+from ansible.plugins.loader import fragment_loader
+from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
+from ansible.utils.plugin_docs import BLACKLIST, add_collection_to_versions_and_dates, add_fragments, get_docstring
+from ansible.utils.version import SemanticVersion
+
+from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec
+
+from .schema import ansible_module_kwargs_schema, doc_schema, return_schema
+
+from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate
+from voluptuous.humanize import humanize_error
+
+from ansible.module_utils.six import PY3, with_metaclass, string_types
+
+if PY3:
+ # Because there is no ast.TryExcept in Python 3 ast module
+ TRY_EXCEPT = ast.Try
+ # REPLACER_WINDOWS from ansible.executor.module_common is byte
+ # string but we need unicode for Python 3
+ REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
+else:
+ TRY_EXCEPT = ast.TryExcept
+
+BLACKLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
+INDENT_REGEX = re.compile(r'([\t]*)')
+TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\([^)].*')
+SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
+BLACKLIST_IMPORTS = {
+ 'requests': {
+ 'new_only': True,
+ 'error': {
+ 'code': 'use-module-utils-urls',
+ 'msg': ('requests import found, should use '
+ 'ansible.module_utils.urls instead')
+ }
+ },
+ r'boto(?:\.|$)': {
+ 'new_only': True,
+ 'error': {
+ 'code': 'use-boto3',
+ 'msg': 'boto import found, new modules should use boto3'
+ }
+ },
+}
+SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*')
+OS_CALL_REGEX = re.compile(r'os\.call.*')
+
+
+LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3]))
+
+
+def compare_dates(d1, d2):
+ try:
+ date1 = parse_isodate(d1, allow_date=True)
+ date2 = parse_isodate(d2, allow_date=True)
+ return date1 == date2
+ except ValueError:
+ # At least one of d1 and d2 cannot be parsed. Simply compare values.
+ return d1 == d2
+
+
+class ReporterEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, Exception):
+ return str(o)
+
+ return json.JSONEncoder.default(self, o)
+
+
+class Reporter:
+ def __init__(self):
+ self.files = OrderedDict()
+
+ def _ensure_default_entry(self, path):
+ try:
+ self.files[path]
+ except KeyError:
+ self.files[path] = {
+ 'errors': [],
+ 'warnings': [],
+ 'traces': [],
+ 'warning_traces': []
+ }
+
+ def _log(self, path, code, msg, level='error', line=0, column=0):
+ self._ensure_default_entry(path)
+ lvl_dct = self.files[path]['%ss' % level]
+ lvl_dct.append({
+ 'code': code,
+ 'msg': msg,
+ 'line': line,
+ 'column': column
+ })
+
+ def error(self, *args, **kwargs):
+ self._log(*args, level='error', **kwargs)
+
+ def warning(self, *args, **kwargs):
+ self._log(*args, level='warning', **kwargs)
+
+ def trace(self, path, tracebk):
+ self._ensure_default_entry(path)
+ self.files[path]['traces'].append(tracebk)
+
+ def warning_trace(self, path, tracebk):
+ self._ensure_default_entry(path)
+ self.files[path]['warning_traces'].append(tracebk)
+
+ @staticmethod
+ @contextmanager
+ def _output_handle(output):
+ if output != '-':
+ handle = open(output, 'w+')
+ else:
+ handle = sys.stdout
+
+ yield handle
+
+ handle.flush()
+ handle.close()
+
+ @staticmethod
+ def _filter_out_ok(reports):
+ temp_reports = OrderedDict()
+ for path, report in reports.items():
+ if report['errors'] or report['warnings']:
+ temp_reports[path] = report
+
+ return temp_reports
+
+ def plain(self, warnings=False, output='-'):
+ """Print out the test results in plain format
+
+ output is ignored here for now
+ """
+ ret = []
+
+ for path, report in Reporter._filter_out_ok(self.files).items():
+ traces = report['traces'][:]
+ if warnings and report['warnings']:
+ traces.extend(report['warning_traces'])
+
+ for trace in traces:
+ print('TRACE:')
+ print('\n '.join((' %s' % trace).splitlines()))
+ for error in report['errors']:
+ error['path'] = path
+ print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error)
+ ret.append(1)
+ if warnings:
+ for warning in report['warnings']:
+ warning['path'] = path
+ print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning)
+
+ return 3 if ret else 0
+
+ def json(self, warnings=False, output='-'):
+ """Print out the test results in json format
+
+ warnings is not respected in this output
+ """
+ ret = [len(r['errors']) for r in self.files.values()]
+
+ with Reporter._output_handle(output) as handle:
+ print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
+
+ return 3 if sum(ret) else 0
+
+
+class Validator(with_metaclass(abc.ABCMeta, object)):
+ """Validator instances are intended to be run on a single object. if you
+ are scanning multiple objects for problems, you'll want to have a separate
+ Validator for each one."""
+
+ def __init__(self, reporter=None):
+ self.reporter = reporter
+
+ @abc.abstractproperty
+ def object_name(self):
+ """Name of the object we validated"""
+ pass
+
+ @abc.abstractproperty
+ def object_path(self):
+ """Path of the object we validated"""
+ pass
+
+ @abc.abstractmethod
+ def validate(self):
+ """Run this method to generate the test results"""
+ pass
+
+
+class ModuleValidator(Validator):
+ BLACKLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
+ BLACKLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
+ 'shippable.yml',
+ '.gitattributes', '.gitmodules', 'COPYING',
+ '__init__.py', 'VERSION', 'test-docs.sh'))
+ BLACKLIST = BLACKLIST_FILES.union(BLACKLIST['MODULE'])
+
+ PS_DOC_BLACKLIST = frozenset((
+ 'async_status.ps1',
+ 'slurp.ps1',
+ 'setup.ps1'
+ ))
+ PS_ARG_VALIDATE_BLACKLIST = frozenset((
+ 'win_dsc.ps1', # win_dsc is a dynamic arg spec, the docs won't ever match
+ ))
+
+ WHITELIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function'))
+
+ def __init__(self, path, analyze_arg_spec=False, collection=None, collection_version=None,
+ base_branch=None, git_cache=None, reporter=None, routing=None):
+ super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
+
+ self.path = path
+ self.basename = os.path.basename(self.path)
+ self.name = os.path.splitext(self.basename)[0]
+
+ self.analyze_arg_spec = analyze_arg_spec
+
+ self._Version = LooseVersion
+ self._StrictVersion = StrictVersion
+
+ self.collection = collection
+ self.collection_name = 'ansible.builtin'
+ if self.collection:
+ self._Version = SemanticVersion
+ self._StrictVersion = SemanticVersion
+ collection_namespace_path, collection_name = os.path.split(self.collection)
+ self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
+ self.routing = routing
+ self.collection_version = None
+ if collection_version is not None:
+ self.collection_version_str = collection_version
+ self.collection_version = SemanticVersion(collection_version)
+
+ self.base_branch = base_branch
+ self.git_cache = git_cache or GitCache()
+
+ self._python_module_override = False
+
+ with open(path) as f:
+ self.text = f.read()
+ self.length = len(self.text.splitlines())
+ try:
+ self.ast = ast.parse(self.text)
+ except Exception:
+ self.ast = None
+
+ if base_branch:
+ self.base_module = self._get_base_file()
+ else:
+ self.base_module = None
+
+ def _create_version(self, v, collection_name=None):
+ if not v:
+ raise ValueError('Empty string is not a valid version')
+ if collection_name == 'ansible.builtin':
+ return LooseVersion(v)
+ if collection_name is not None:
+ return SemanticVersion(v)
+ return self._Version(v)
+
+ def _create_strict_version(self, v, collection_name=None):
+ if not v:
+ raise ValueError('Empty string is not a valid version')
+ if collection_name == 'ansible.builtin':
+ return StrictVersion(v)
+ if collection_name is not None:
+ return SemanticVersion(v)
+ return self._StrictVersion(v)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if not self.base_module:
+ return
+
+ try:
+ os.remove(self.base_module)
+ except Exception:
+ pass
+
+ @property
+ def object_name(self):
+ return self.basename
+
+ @property
+ def object_path(self):
+ return self.path
+
+ def _get_collection_meta(self):
+ """Implement if we need this for version_added comparisons
+ """
+ pass
+
+ def _python_module(self):
+ if self.path.endswith('.py') or self._python_module_override:
+ return True
+ return False
+
+ def _powershell_module(self):
+ if self.path.endswith('.ps1'):
+ return True
+ return False
+
+ def _just_docs(self):
+ """Module can contain just docs and from __future__ boilerplate
+ """
+ try:
+ for child in self.ast.body:
+ if not isinstance(child, ast.Assign):
+ # allowed from __future__ imports
+ if isinstance(child, ast.ImportFrom) and child.module == '__future__':
+ for future_import in child.names:
+ if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
+ break
+ else:
+ continue
+ return False
+ return True
+ except AttributeError:
+ return False
+
+ def _get_base_branch_module_path(self):
+ """List all paths within lib/ansible/modules to try and match a moved module"""
+ return self.git_cache.base_module_paths.get(self.object_name)
+
+ def _has_alias(self):
+ """Return true if the module has any aliases."""
+ return self.object_name in self.git_cache.head_aliased_modules
+
+ def _get_base_file(self):
+ # In case of module moves, look for the original location
+ base_path = self._get_base_branch_module_path()
+
+ command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)]
+ p = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if int(p.returncode) != 0:
+ return None
+
+ t = tempfile.NamedTemporaryFile(delete=False)
+ t.write(stdout)
+ t.close()
+
+ return t.name
+
+ def _is_new_module(self):
+ if self._has_alias():
+ return False
+
+ return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module)
+
+ def _check_interpreter(self, powershell=False):
+ if powershell:
+ if not self.text.startswith('#!powershell\n'):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-powershell-interpreter',
+ msg='Interpreter line is not "#!powershell"'
+ )
+ return
+
+ if not self.text.startswith('#!/usr/bin/python'):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-python-interpreter',
+ msg='Interpreter line is not "#!/usr/bin/python"',
+ )
+
+ def _check_type_instead_of_isinstance(self, powershell=False):
+ if powershell:
+ return
+ for line_no, line in enumerate(self.text.splitlines()):
+ typekeyword = TYPE_REGEX.match(line)
+ if typekeyword:
+ # TODO: add column
+ self.reporter.error(
+ path=self.object_path,
+ code='unidiomatic-typecheck',
+ msg=('Type comparison using type() found. '
+ 'Use isinstance() instead'),
+ line=line_no + 1
+ )
+
+ def _check_for_sys_exit(self):
+ # Optimize out the happy path
+ if 'sys.exit' not in self.text:
+ return
+
+ for line_no, line in enumerate(self.text.splitlines()):
+ sys_exit_usage = SYS_EXIT_REGEX.match(line)
+ if sys_exit_usage:
+ # TODO: add column
+ self.reporter.error(
+ path=self.object_path,
+ code='use-fail-json-not-sys-exit',
+ msg='sys.exit() call found. Should be exit_json/fail_json',
+ line=line_no + 1
+ )
+
+ def _check_gpl3_header(self):
+ header = '\n'.join(self.text.split('\n')[:20])
+ if ('GNU General Public License' not in header or
+ ('version 3' not in header and 'v3.0' not in header)):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-gplv3-license',
+ msg='GPLv3 license header not found in the first 20 lines of the module'
+ )
+ elif self._is_new_module():
+ if len([line for line in header
+ if 'GNU General Public License' in line]) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-short-gplv3-license',
+ msg='Found old style GPLv3 license header: '
+ 'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright'
+ )
+
+ def _check_for_subprocess(self):
+ for child in self.ast.body:
+ if isinstance(child, ast.Import):
+ if child.names[0].name == 'subprocess':
+ for line_no, line in enumerate(self.text.splitlines()):
+ sp_match = SUBPROCESS_REGEX.search(line)
+ if sp_match:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-run-command-not-popen',
+ msg=('subprocess.Popen call found. Should be module.run_command'),
+ line=(line_no + 1),
+ column=(sp_match.span()[0] + 1)
+ )
+
+ def _check_for_os_call(self):
+ if 'os.call' in self.text:
+ for line_no, line in enumerate(self.text.splitlines()):
+ os_call_match = OS_CALL_REGEX.search(line)
+ if os_call_match:
+ self.reporter.error(
+ path=self.object_path,
+ code='use-run-command-not-os-call',
+ msg=('os.call() call found. Should be module.run_command'),
+ line=(line_no + 1),
+ column=(os_call_match.span()[0] + 1)
+ )
+
+ def _find_blacklist_imports(self):
+ for child in self.ast.body:
+ names = []
+ if isinstance(child, ast.Import):
+ names.extend(child.names)
+ elif isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, ast.Import):
+ names.extend(grandchild.names)
+ for name in names:
+ # TODO: Add line/col
+ for blacklist_import, options in BLACKLIST_IMPORTS.items():
+ if re.search(blacklist_import, name.name):
+ new_only = options['new_only']
+ if self._is_new_module() and new_only:
+ self.reporter.error(
+ path=self.object_path,
+ **options['error']
+ )
+ elif not new_only:
+ self.reporter.error(
+ path=self.object_path,
+ **options['error']
+ )
+
+ def _find_module_utils(self, main):
+ linenos = []
+ found_basic = False
+ for child in self.ast.body:
+ if isinstance(child, (ast.Import, ast.ImportFrom)):
+ names = []
+ try:
+ names.append(child.module)
+ if child.module.endswith('.basic'):
+ found_basic = True
+ except AttributeError:
+ pass
+ names.extend([n.name for n in child.names])
+
+ if [n for n in names if n.startswith('ansible.module_utils')]:
+ linenos.append(child.lineno)
+
+ for name in child.names:
+ if ('module_utils' in getattr(child, 'module', '') and
+ isinstance(name, ast.alias) and
+ name.name == '*'):
+ msg = (
+ 'module-utils-specific-import',
+ ('module_utils imports should import specific '
+ 'components, not "*"')
+ )
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=child.lineno
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=child.lineno
+ )
+
+ if (isinstance(name, ast.alias) and
+ name.name == 'basic'):
+ found_basic = True
+
+ if not found_basic:
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-module-utils-basic-import',
+ msg='Did not find "ansible.module_utils.basic" import'
+ )
+
+ return linenos
+
+ def _get_first_callable(self):
+ linenos = []
+ for child in self.ast.body:
+ if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
+ linenos.append(child.lineno)
+
+ return min(linenos)
+
+ def _find_main_call(self, look_for="main"):
+ """ Ensure that the module ends with:
+ if __name__ == '__main__':
+ main()
+ OR, in the case of modules that are in the docs-only deprecation phase
+ if __name__ == '__main__':
+ removed_module()
+ """
+ lineno = False
+ if_bodies = []
+ for child in self.ast.body:
+ if isinstance(child, ast.If):
+ try:
+ if child.test.left.id == '__name__':
+ if_bodies.extend(child.body)
+ except AttributeError:
+ pass
+
+ bodies = self.ast.body
+ bodies.extend(if_bodies)
+
+ for child in bodies:
+
+ # validate that the next to last line is 'if __name__ == "__main__"'
+ if child.lineno == (self.length - 1):
+
+ mainchecked = False
+ try:
+ if isinstance(child, ast.If) and \
+ child.test.left.id == '__name__' and \
+ len(child.test.ops) == 1 and \
+ isinstance(child.test.ops[0], ast.Eq) and \
+ child.test.comparators[0].s == '__main__':
+ mainchecked = True
+ except Exception:
+ pass
+
+ if not mainchecked:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-if-name-main',
+ msg='Next to last line should be: if __name__ == "__main__":',
+ line=child.lineno
+ )
+
+ # validate that the final line is a call to main()
+ if isinstance(child, ast.Expr):
+ if isinstance(child.value, ast.Call):
+ if (isinstance(child.value.func, ast.Name) and
+ child.value.func.id == look_for):
+ lineno = child.lineno
+ if lineno < self.length - 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='last-line-main-call',
+ msg=('Call to %s() not the last line' % look_for),
+ line=lineno
+ )
+
+ if not lineno:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-main-call',
+ msg=('Did not find a call to %s()' % look_for)
+ )
+
+ return lineno or 0
+
+ def _find_has_import(self):
+ for child in self.ast.body:
+ found_try_except_import = False
+ found_has = False
+ if isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, ast.Import):
+ found_try_except_import = True
+ if isinstance(grandchild, ast.Assign):
+ for target in grandchild.targets:
+ if not isinstance(target, ast.Name):
+ continue
+ if target.id.lower().startswith('has_'):
+ found_has = True
+ if found_try_except_import and not found_has:
+ # TODO: Add line/col
+ self.reporter.warning(
+ path=self.object_path,
+ code='try-except-missing-has',
+ msg='Found Try/Except block without HAS_ assignment'
+ )
+
+ def _ensure_imports_below_docs(self, doc_info, first_callable):
+ try:
+ min_doc_line = min(
+ [doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']]
+ )
+ except ValueError:
+ # We can't perform this validation, as there are no DOCs provided at all
+ return
+
+ max_doc_line = max(
+ [doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']]
+ )
+
+ import_lines = []
+
+ for child in self.ast.body:
+ if isinstance(child, (ast.Import, ast.ImportFrom)):
+ if isinstance(child, ast.ImportFrom) and child.module == '__future__':
+ # allowed from __future__ imports
+ for future_import in child.names:
+ if future_import.name not in self.WHITELIST_FUTURE_IMPORTS:
+ self.reporter.error(
+ path=self.object_path,
+ code='illegal-future-imports',
+ msg=('Only the following from __future__ imports are allowed: %s'
+ % ', '.join(self.WHITELIST_FUTURE_IMPORTS)),
+ line=child.lineno
+ )
+ break
+ else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import
+ continue
+ import_lines.append(child.lineno)
+ if child.lineno < min_doc_line:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-before-documentation',
+ msg=('Import found before documentation variables. '
+ 'All imports must appear below '
+ 'DOCUMENTATION/EXAMPLES/RETURN.'),
+ line=child.lineno
+ )
+ break
+ elif isinstance(child, TRY_EXCEPT):
+ bodies = child.body
+ for handler in child.handlers:
+ bodies.extend(handler.body)
+ for grandchild in bodies:
+ if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
+ import_lines.append(grandchild.lineno)
+ if grandchild.lineno < min_doc_line:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-before-documentation',
+ msg=('Import found before documentation '
+ 'variables. All imports must appear below '
+ 'DOCUMENTATION/EXAMPLES/RETURN.'),
+ line=child.lineno
+ )
+ break
+
+ for import_line in import_lines:
+ if not (max_doc_line < import_line < first_callable):
+ msg = (
+ 'import-placement',
+ ('Imports should be directly below DOCUMENTATION/EXAMPLES/'
+ 'RETURN.')
+ )
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=import_line
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code=msg[0],
+ msg=msg[1],
+ line=import_line
+ )
+
+ def _validate_ps_replacers(self):
+ # loop all (for/else + error)
+ # get module list for each
+ # check "shape" of each module name
+
+ module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
+ csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)'
+ found_requires = False
+
+ for req_stmt in re.finditer(module_requires, self.text):
+ found_requires = True
+ # this will bomb on dictionary format - "don't do that"
+ module_list = [x.strip() for x in req_stmt.group(1).split(',')]
+ if len(module_list) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='multiple-utils-per-requires',
+ msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
+ )
+ continue
+
+ module_name = module_list[0]
+
+ if module_name.lower().endswith('.psm1'):
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-requires-extension',
+ msg='Module #Requires should not end in .psm1: "%s"' % module_name
+ )
+
+ for req_stmt in re.finditer(csharp_requires, self.text):
+ found_requires = True
+ # this will bomb on dictionary format - "don't do that"
+ module_list = [x.strip() for x in req_stmt.group(1).split(',')]
+ if len(module_list) > 1:
+ self.reporter.error(
+ path=self.object_path,
+ code='multiple-csharp-utils-per-requires',
+ msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
+ )
+ continue
+
+ module_name = module_list[0]
+
+ if module_name.lower().endswith('.cs'):
+ self.reporter.error(
+ path=self.object_path,
+ code='illegal-extension-cs',
+ msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
+ )
+
+ # also accept the legacy #POWERSHELL_COMMON replacer signal
+ if not found_requires and REPLACER_WINDOWS not in self.text:
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-module-utils-import-csharp-requirements',
+ msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
+ )
+
+ def _find_ps_docs_py_file(self):
+ if self.object_name in self.PS_DOC_BLACKLIST:
+ return
+ py_path = self.path.replace('.ps1', '.py')
+ if not os.path.isfile(py_path):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-python-doc',
+ msg='Missing python documentation file'
+ )
+ return py_path
+
+ def _get_docs(self):
+ docs = {
+ 'DOCUMENTATION': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ 'EXAMPLES': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ 'RETURN': {
+ 'value': None,
+ 'lineno': 0,
+ 'end_lineno': 0,
+ },
+ }
+ for child in self.ast.body:
+ if isinstance(child, ast.Assign):
+ for grandchild in child.targets:
+ if not isinstance(grandchild, ast.Name):
+ continue
+
+ if grandchild.id == 'DOCUMENTATION':
+ docs['DOCUMENTATION']['value'] = child.value.s
+ docs['DOCUMENTATION']['lineno'] = child.lineno
+ docs['DOCUMENTATION']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+ elif grandchild.id == 'EXAMPLES':
+ docs['EXAMPLES']['value'] = child.value.s
+ docs['EXAMPLES']['lineno'] = child.lineno
+ docs['EXAMPLES']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+ elif grandchild.id == 'RETURN':
+ docs['RETURN']['value'] = child.value.s
+ docs['RETURN']['lineno'] = child.lineno
+ docs['RETURN']['end_lineno'] = (
+ child.lineno + len(child.value.s.splitlines())
+ )
+
+ return docs
+
+ def _validate_docs_schema(self, doc, schema, name, error_code):
+ # TODO: Add line/col
+ errors = []
+ try:
+ schema(doc)
+ except Exception as e:
+ for error in e.errors:
+ error.data = doc
+ errors.extend(e.errors)
+
+ for error in errors:
+ path = [str(p) for p in error.path]
+
+ local_error_code = getattr(error, 'ansible_error_code', error_code)
+
+ if isinstance(error.data, dict):
+ error_message = humanize_error(error.data, error)
+ else:
+ error_message = error
+
+ if path:
+ combined_path = '%s.%s' % (name, '.'.join(path))
+ else:
+ combined_path = name
+
+ self.reporter.error(
+ path=self.object_path,
+ code=local_error_code,
+ msg='%s: %s' % (combined_path, error_message)
+ )
+
+ def _validate_docs(self):
+ doc_info = self._get_docs()
+ doc = None
+ documentation_exists = False
+ examples_exist = False
+ returns_exist = False
+ # We have three ways of marking deprecated/removed files. Have to check each one
+ # individually and then make sure they all agree
+ filename_deprecated_or_removed = False
+ deprecated = False
+ removed = False
+ doc_deprecated = None # doc legally might not exist
+ routing_says_deprecated = False
+
+ if self.object_name.startswith('_') and not os.path.islink(self.object_path):
+ filename_deprecated_or_removed = True
+
+ # We are testing a collection
+ if self.routing:
+ routing_deprecation = self.routing.get('plugin_routing', {}).get('modules', {}).get(self.name, {}).get('deprecation', {})
+ if routing_deprecation:
+ # meta/runtime.yml says this is deprecated
+ routing_says_deprecated = True
+ deprecated = True
+
+ if not removed:
+ if not bool(doc_info['DOCUMENTATION']['value']):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-documentation',
+ msg='No DOCUMENTATION provided'
+ )
+ else:
+ documentation_exists = True
+ doc, errors, traces = parse_yaml(
+ doc_info['DOCUMENTATION']['value'],
+ doc_info['DOCUMENTATION']['lineno'],
+ self.name, 'DOCUMENTATION'
+ )
+ if doc:
+ add_collection_to_versions_and_dates(doc, self.collection_name, is_module=True)
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='documentation-syntax-error',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+ if not errors and not traces:
+ missing_fragment = False
+ with CaptureStd():
+ try:
+ get_docstring(self.path, fragment_loader, verbose=True,
+ collection_name=self.collection_name, is_module=True)
+ except AssertionError:
+ fragment = doc['extends_documentation_fragment']
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-doc-fragment',
+ msg='DOCUMENTATION fragment missing: %s' % fragment
+ )
+ missing_fragment = True
+ except Exception as e:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ self.reporter.error(
+ path=self.object_path,
+ code='documentation-error',
+ msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
+ )
+
+ if not missing_fragment:
+ add_fragments(doc, self.object_path, fragment_loader=fragment_loader, is_module=True)
+
+ if 'options' in doc and doc['options'] is None:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-documentation-options',
+ msg='DOCUMENTATION.options must be a dictionary/hash when used',
+ )
+
+ if 'deprecated' in doc and doc.get('deprecated'):
+ doc_deprecated = True
+ doc_deprecation = doc['deprecated']
+ documentation_collection = doc_deprecation.get('removed_from_collection')
+ if documentation_collection != self.collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-wrong-collection',
+ msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
+ documentation_collection, self.collection_name)
+ )
+ else:
+ doc_deprecated = False
+
+ if os.path.islink(self.object_path):
+ # This module has an alias, which we can tell as it's a symlink
+ # Rather than checking for `module: $filename` we need to check against the true filename
+ self._validate_docs_schema(
+ doc,
+ doc_schema(
+ os.readlink(self.object_path).split('.')[0],
+ for_collection=bool(self.collection),
+ deprecated_module=deprecated,
+ ),
+ 'DOCUMENTATION',
+ 'invalid-documentation',
+ )
+ else:
+ # This is the normal case
+ self._validate_docs_schema(
+ doc,
+ doc_schema(
+ self.object_name.split('.')[0],
+ for_collection=bool(self.collection),
+ deprecated_module=deprecated,
+ ),
+ 'DOCUMENTATION',
+ 'invalid-documentation',
+ )
+
+ if not self.collection:
+ existing_doc = self._check_for_new_args(doc)
+ self._check_version_added(doc, existing_doc)
+
+ if not bool(doc_info['EXAMPLES']['value']):
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-examples',
+ msg='No EXAMPLES provided'
+ )
+ else:
+ _doc, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'],
+ doc_info['EXAMPLES']['lineno'],
+ self.name, 'EXAMPLES', load_all=True)
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-examples',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ if not bool(doc_info['RETURN']['value']):
+ if self._is_new_module():
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-return',
+ msg='No RETURN provided'
+ )
+ else:
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-return-legacy',
+ msg='No RETURN provided'
+ )
+ else:
+ data, errors, traces = parse_yaml(doc_info['RETURN']['value'],
+ doc_info['RETURN']['lineno'],
+ self.name, 'RETURN')
+ if data:
+ add_collection_to_versions_and_dates(data, self.collection_name, is_module=True, return_docs=True)
+ self._validate_docs_schema(data, return_schema(for_collection=bool(self.collection)),
+ 'RETURN', 'return-syntax-error')
+
+ for error in errors:
+ self.reporter.error(
+ path=self.object_path,
+ code='return-syntax-error',
+ **error
+ )
+ for trace in traces:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=trace
+ )
+
+ # Check for mismatched deprecation
+ if not self.collection:
+ mismatched_deprecation = True
+ if not (filename_deprecated_or_removed or removed or deprecated or doc_deprecated):
+ mismatched_deprecation = False
+ else:
+ if (filename_deprecated_or_removed and deprecated and doc_deprecated):
+ mismatched_deprecation = False
+ if (filename_deprecated_or_removed and removed and not (documentation_exists or examples_exist or returns_exist)):
+ mismatched_deprecation = False
+
+ if mismatched_deprecation:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='Module deprecation/removed must agree in documentaiton, by prepending filename with'
+ ' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
+ ' documentation for removed'
+ )
+ else:
+ # We are testing a collection
+ if self.object_name.startswith('_'):
+ self.reporter.error(
+ path=self.object_path,
+ code='collections-no-underscore-on-deprecation',
+ msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead',
+ )
+
+ if not (doc_deprecated == routing_says_deprecated):
+ # DOCUMENTATION.deprecated and meta/runtime.yml disagree
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
+ )
+ elif routing_says_deprecated:
+ # Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
+ # Make sure they give the same version or date.
+ routing_date = routing_deprecation.get('removal_date')
+ routing_version = routing_deprecation.get('removal_version')
+ # The versions and dates in the module documentation are auto-tagged, so remove the tag
+ # to make comparison possible and to avoid confusing the user.
+ documentation_date = doc_deprecation.get('removed_at_date')
+ documentation_version = doc_deprecation.get('removed_in')
+ if not compare_dates(routing_date, documentation_date):
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
+ routing_date, documentation_date)
+ )
+ if routing_version != documentation_version:
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecation-mismatch',
+ msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
+ routing_version, documentation_version)
+ )
+
+ # In the future we should error if ANSIBLE_METADATA exists in a collection
+
+ return doc_info, doc
+
+ def _check_version_added(self, doc, existing_doc):
+ version_added_raw = doc.get('version_added')
+ try:
+ collection_name = doc.get('version_added_collection')
+ version_added = self._create_strict_version(
+ str(version_added_raw or '0.0'),
+ collection_name=collection_name)
+ except ValueError as e:
+ version_added = version_added_raw or '0.0'
+ if self._is_new_module() or version_added != 'historical':
+ # already reported during schema validation, except:
+ if version_added == 'historical':
+ self.reporter.error(
+ path=self.object_path,
+ code='module-invalid-version-added',
+ msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
+ )
+ return
+
+ if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
+ self.reporter.error(
+ path=self.object_path,
+ code='module-incorrect-version-added',
+ msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
+ )
+
+ if not self._is_new_module():
+ return
+
+ should_be = '.'.join(ansible_version.split('.')[:2])
+ strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
+
+ if (version_added < strict_ansible_version or
+ strict_ansible_version < version_added):
+ self.reporter.error(
+ path=self.object_path,
+ code='module-incorrect-version-added',
+ msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
+ )
+
+ def _validate_ansible_module_call(self, docs):
+ try:
+ spec, args, kwargs = get_argument_spec(self.path, self.collection)
+ except AnsibleModuleNotInitialized:
+ self.reporter.error(
+ path=self.object_path,
+ code='ansible-module-not-initialized',
+ msg="Execution of the module did not result in initialization of AnsibleModule",
+ )
+ return
+ except AnsibleModuleImportError as e:
+ self.reporter.error(
+ path=self.object_path,
+ code='import-error',
+ msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
+ )
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ return
+
+ self._validate_docs_schema(kwargs, ansible_module_kwargs_schema(for_collection=bool(self.collection)),
+ 'AnsibleModule', 'invalid-ansiblemodule-schema')
+
+ self._validate_argument_spec(docs, spec, kwargs)
+
+ def _validate_list_of_module_args(self, name, terms, spec, context):
+ if terms is None:
+ return
+ if not isinstance(terms, (list, tuple)):
+ # This is already reported by schema checking
+ return
+ for check in terms:
+ if not isinstance(check, (list, tuple)):
+ # This is already reported by schema checking
+ continue
+ bad_term = False
+ for term in check:
+ if not isinstance(term, string_types):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must contain strings in the lists or tuples; found value %r" % (term, )
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-type',
+ msg=msg,
+ )
+ bad_term = True
+ if bad_term:
+ continue
+ if len(set(check)) != len(check):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms"
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-collision',
+ msg=msg,
+ )
+ if not set(check) <= set(spec):
+ msg = name
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code=name + '-unknown',
+ msg=msg,
+ )
+
+ def _validate_required_if(self, terms, spec, context, module):
+ if terms is None:
+ return
+ if not isinstance(terms, (list, tuple)):
+ # This is already reported by schema checking
+ return
+ for check in terms:
+ if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
+ # This is already reported by schema checking
+ continue
+ if len(check) == 4 and not isinstance(check[3], bool):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-is_one_of-type',
+ msg=msg,
+ )
+ requirements = check[2]
+ if not isinstance(requirements, (list, tuple)):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-type',
+ msg=msg,
+ )
+ continue
+ bad_term = False
+ for term in requirements:
+ if not isinstance(term, string_types):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have only strings in third value (requirements); got %r" % (term, )
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-type',
+ msg=msg,
+ )
+ bad_term = True
+ if bad_term:
+ continue
+ if len(set(requirements)) != len(requirements):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms in requirements"
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-collision',
+ msg=msg,
+ )
+ if not set(requirements) <= set(spec):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-requirements-unknown',
+ msg=msg,
+ )
+ key = check[0]
+ if key not in spec:
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must have its key %s in argument_spec" % key
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-unknown-key',
+ msg=msg,
+ )
+ continue
+ if key in requirements:
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains its key %s in requirements" % key
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-key-in-requirements',
+ msg=msg,
+ )
+ value = check[1]
+ if value is not None:
+ _type = spec[key].get('type', 'str')
+ if callable(_type):
+ _type_checker = _type
+ else:
+ _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
+ try:
+ with CaptureStd():
+ dummy = _type_checker(value)
+ except (Exception, SystemExit):
+ msg = "required_if"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='required_if-value-type',
+ msg=msg,
+ )
+
+ def _validate_required_by(self, terms, spec, context):
+ if terms is None:
+ return
+ if not isinstance(terms, Mapping):
+ # This is already reported by schema checking
+ return
+ for key, value in terms.items():
+ if isinstance(value, string_types):
+ value = [value]
+ if not isinstance(value, (list, tuple)):
+ # This is already reported by schema checking
+ continue
+ for term in value:
+ if not isinstance(term, string_types):
+ # This is already reported by schema checking
+ continue
+ if len(set(value)) != len(value) or key in value:
+ msg = "required_by"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has repeated terms"
+ self.reporter.error(
+ path=self.object_path,
+ code='required_by-collision',
+ msg=msg,
+ )
+ if not set(value) <= set(spec) or key not in spec:
+ msg = "required_by"
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
+ self.reporter.error(
+ path=self.object_path,
+ code='required_by-unknown',
+ msg=msg,
+ )
+
+ def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
+ if not self.analyze_arg_spec:
+ return
+
+ if docs is None:
+ docs = {}
+
+ if context is None:
+ context = []
+
+ if last_context_spec is None:
+ last_context_spec = kwargs
+
+ try:
+ if not context:
+ add_fragments(docs, self.object_path, fragment_loader=fragment_loader, is_module=True)
+ except Exception:
+ # Cannot merge fragments
+ return
+
+ # Use this to access type checkers later
+ module = NoArgsAnsibleModule({})
+
+ self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
+ self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
+ self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
+ self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
+ self._validate_required_by(last_context_spec.get('required_by'), spec, context)
+
+ provider_args = set()
+ args_from_argspec = set()
+ deprecated_args_from_argspec = set()
+ doc_options = docs.get('options', {})
+ if doc_options is None:
+ doc_options = {}
+ for arg, data in spec.items():
+ restricted_argument_names = ('message', 'syslog_facility')
+ if arg.lower() in restricted_argument_names:
+ msg = "Argument '%s' in argument_spec " % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += "must not be one of %s as it is used " \
+ "internally by Ansible Core Engine" % (",".join(restricted_argument_names))
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-name',
+ msg=msg,
+ )
+ continue
+ if 'aliases' in data:
+ for al in data['aliases']:
+ if al.lower() in restricted_argument_names:
+ msg = "Argument alias '%s' in argument_spec " % al
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += "must not be one of %s as it is used " \
+ "internally by Ansible Core Engine" % (",".join(restricted_argument_names))
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-name',
+ msg=msg,
+ )
+ continue
+
+ if not isinstance(data, dict):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " must be a dictionary/hash when used"
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-spec',
+ msg=msg,
+ )
+ continue
+
+ removed_at_date = data.get('removed_at_date', None)
+ if removed_at_date is not None:
+ try:
+ if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has a removed_at_date '%s' before today" % removed_at_date
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecated-date',
+ msg=msg,
+ )
+ except ValueError:
+ # This should only happen when removed_at_date is not in ISO format. Since schema
+ # validation already reported this as an error, don't report it a second time.
+ pass
+
+ deprecated_aliases = data.get('deprecated_aliases', None)
+ if deprecated_aliases is not None:
+ for deprecated_alias in deprecated_aliases:
+ if 'name' in deprecated_alias and 'date' in deprecated_alias:
+ try:
+ date = deprecated_alias['date']
+ if parse_isodate(date, allow_date=False) < datetime.date.today():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
+ deprecated_alias['name'], deprecated_alias['date'])
+ self.reporter.error(
+ path=self.object_path,
+ code='deprecated-date',
+ msg=msg,
+ )
+ except ValueError:
+ # This should only happen when deprecated_alias['date'] is not in ISO format. Since
+ # schema validation already reported this as an error, don't report it a second
+ # time.
+ pass
+
+ has_version = False
+ if self.collection and self.collection_version is not None:
+ compare_version = self.collection_version
+ version_of_what = "this collection (%s)" % self.collection_version_str
+ code_prefix = 'collection'
+ has_version = True
+ elif not self.collection:
+ compare_version = LOOSE_ANSIBLE_VERSION
+ version_of_what = "Ansible (%s)" % ansible_version
+ code_prefix = 'ansible'
+ has_version = True
+
+ removed_in_version = data.get('removed_in_version', None)
+ if removed_in_version is not None:
+ try:
+ collection_name = data.get('removed_from_collection')
+ removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
+ if has_version and collection_name == self.collection_name and compare_version >= removed_in:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has a deprecated removed_in_version %r," % removed_in_version
+ msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code=code_prefix + '-deprecated-version',
+ msg=msg,
+ )
+ except ValueError as e:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+ except TypeError:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
+ msg += " error while comparing to version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+
+ if deprecated_aliases is not None:
+ for deprecated_alias in deprecated_aliases:
+ if 'name' in deprecated_alias and 'version' in deprecated_alias:
+ try:
+ collection_name = deprecated_alias.get('collection_name')
+ version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
+ if has_version and collection_name == self.collection_name and compare_version >= version:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with removal in version %r," % (
+ deprecated_alias['name'], deprecated_alias['version'])
+ msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code=code_prefix + '-deprecated-version',
+ msg=msg,
+ )
+ except ValueError as e:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
+ deprecated_alias['name'], deprecated_alias['version'], e)
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+ except TypeError:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
+ deprecated_alias['name'], deprecated_alias['version'])
+ msg += " error while comparing to version of %s" % version_of_what
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-deprecated-version',
+ msg=msg,
+ )
+
+ aliases = data.get('aliases', [])
+ if arg in aliases:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is specified as its own alias"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-alias-self',
+ msg=msg
+ )
+ if len(aliases) > len(set(aliases)):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has at least one alias specified multiple times in aliases"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-alias-repeated',
+ msg=msg
+ )
+ if not context and arg == 'state':
+ bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
+ for bad_state in bad_states:
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-state-invalid-choice',
+ msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
+ if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
+ args_from_argspec.add(arg)
+ args_from_argspec.update(aliases)
+ else:
+ deprecated_args_from_argspec.add(arg)
+ deprecated_args_from_argspec.update(aliases)
+ if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
+ if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-argument-spec-options',
+ msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
+ )
+ elif data.get('options'):
+ # Record provider options from network modules, for later comparison
+ for provider_arg, provider_data in data.get('options', {}).items():
+ provider_args.add(provider_arg)
+ provider_args.update(provider_data.get('aliases', []))
+
+ if data.get('required') and data.get('default', object) != object:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is marked as required but specifies a default. Arguments with a" \
+ " default should not be marked as required"
+ self.reporter.error(
+ path=self.object_path,
+ code='no-default-for-required-parameter',
+ msg=msg
+ )
+
+ if arg in provider_args:
+ # Provider args are being removed from network module top level
+ # don't validate docs<->arg_spec checks below
+ continue
+
+ _type = data.get('type', 'str')
+ if callable(_type):
+ _type_checker = _type
+ else:
+ _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type)
+
+ _elements = data.get('elements')
+ if (_type == 'list') and not _elements:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as list but elements is not defined"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-list-no-elements',
+ msg=msg
+ )
+ if _elements:
+ if not callable(_elements):
+ module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_elements)
+ if _type != 'list':
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-invalid-elements',
+ msg=msg
+ )
+
+ arg_default = None
+ if 'default' in data and not is_empty(data['default']):
+ try:
+ with CaptureStd():
+ arg_default = _type_checker(data['default'])
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='incompatible-default-type',
+ msg=msg
+ )
+ continue
+
+ doc_options_args = []
+ for alias in sorted(set([arg] + list(aliases))):
+ if alias in doc_options:
+ doc_options_args.append(alias)
+ if len(doc_options_args) == 0:
+ # Undocumented arguments will be handled later (search for undocumented-parameter)
+ doc_options_arg = {}
+ else:
+ doc_options_arg = doc_options[doc_options_args[0]]
+ if len(doc_options_args) > 1:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " with aliases %s is documented multiple times, namely as %s" % (
+ ", ".join([("'%s'" % alias) for alias in aliases]),
+ ", ".join([("'%s'" % alias) for alias in doc_options_args])
+ )
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-documented-multiple-times',
+ msg=msg
+ )
+
+ try:
+ doc_default = None
+ if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']):
+ with CaptureStd():
+ doc_default = _type_checker(doc_options_arg['default'])
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in documentation" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-default-incompatible-type',
+ msg=msg
+ )
+ continue
+
+ if arg_default != doc_default:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-default-does-not-match-spec',
+ msg=msg
+ )
+
+ doc_type = doc_options_arg.get('type')
+ if 'type' in data and data['type'] is not None:
+ if doc_type is None:
+ if not arg.startswith('_'): # hidden parameter, for example _raw_params
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as %r but documentation doesn't define type" % (data['type'])
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-type-not-in-doc',
+ msg=msg
+ )
+ elif data['type'] != doc_type:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-type-does-not-match-spec',
+ msg=msg
+ )
+ else:
+ if doc_type is None:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " uses default type ('str') but documentation doesn't define type"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-missing-type',
+ msg=msg
+ )
+ elif doc_type != 'str':
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " implies type as 'str' but documentation defines as %r" % doc_type
+ self.reporter.error(
+ path=self.object_path,
+ code='implied-parameter-type-mismatch',
+ msg=msg
+ )
+
+ doc_choices = []
+ try:
+ for choice in doc_options_arg.get('choices', []):
+ try:
+ with CaptureStd():
+ doc_choices.append(_type_checker(choice))
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in documentation" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-choices-incompatible-type',
+ msg=msg
+ )
+ raise StopIteration()
+ except StopIteration:
+ continue
+
+ arg_choices = []
+ try:
+ for choice in data.get('choices', []):
+ try:
+ with CaptureStd():
+ arg_choices.append(_type_checker(choice))
+ except (Exception, SystemExit):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
+ self.reporter.error(
+ path=self.object_path,
+ code='incompatible-choices',
+ msg=msg
+ )
+ raise StopIteration()
+ except StopIteration:
+ continue
+
+ if not compare_unordered_lists(arg_choices, doc_choices):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-choices-do-not-match-spec',
+ msg=msg
+ )
+
+ doc_required = doc_options_arg.get('required', False)
+ data_required = data.get('required', False)
+ if (doc_required or data_required) and not (doc_required and data_required):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ if doc_required:
+ msg += " is not required, but is documented as being required"
+ else:
+ msg += " is required, but is not documented as being required"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-required-mismatch',
+ msg=msg
+ )
+
+ doc_elements = doc_options_arg.get('elements', None)
+ doc_type = doc_options_arg.get('type', 'str')
+ data_elements = data.get('elements', None)
+ if (doc_elements and not doc_type == 'list'):
+ msg = "Argument '%s " % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " defines parameter elements as %s but it is valid only when value of parameter type is list" % doc_elements
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-elements-invalid',
+ msg=msg
+ )
+ if (doc_elements or data_elements) and not (doc_elements == data_elements):
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ if data_elements:
+ msg += " specifies elements as %s," % data_elements
+ else:
+ msg += " does not specify elements,"
+ if doc_elements:
+ msg += "but elements is documented as being %s" % doc_elements
+ else:
+ msg += "but elements is not documented"
+ self.reporter.error(
+ path=self.object_path,
+ code='doc-elements-mismatch',
+ msg=msg
+ )
+
+ spec_suboptions = data.get('options')
+ doc_suboptions = doc_options_arg.get('suboptions', {})
+ if spec_suboptions:
+ if not doc_suboptions:
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " has sub-options but documentation does not define it"
+ self.reporter.error(
+ path=self.object_path,
+ code='missing-suboption-docs',
+ msg=msg
+ )
+ self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
+ context=context + [arg], last_context_spec=data)
+
+ for arg in args_from_argspec:
+ if not str(arg).isidentifier():
+ msg = "Argument '%s' in argument_spec" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is not a valid python identifier"
+ self.reporter.error(
+ path=self.object_path,
+ code='parameter-invalid',
+ msg=msg
+ )
+
+ if docs:
+ args_from_docs = set()
+ for arg, data in doc_options.items():
+ args_from_docs.add(arg)
+ args_from_docs.update(data.get('aliases', []))
+
+ args_missing_from_docs = args_from_argspec.difference(args_from_docs)
+ docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
+ for arg in args_missing_from_docs:
+ if arg in provider_args:
+ # Provider args are being removed from network module top level
+ # So they are likely not documented on purpose
+ continue
+ msg = "Argument '%s'" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is listed in the argument_spec, but not documented in the module documentation"
+ self.reporter.error(
+ path=self.object_path,
+ code='undocumented-parameter',
+ msg=msg
+ )
+ for arg in docs_missing_from_args:
+ msg = "Argument '%s'" % arg
+ if context:
+ msg += " found in %s" % " -> ".join(context)
+ msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
+ self.reporter.error(
+ path=self.object_path,
+ code='nonexistent-parameter-documented',
+ msg=msg
+ )
+
+ def _check_for_new_args(self, doc):
+ if not self.base_branch or self._is_new_module():
+ return
+
+ with CaptureStd():
+ try:
+ existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
+ self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True)
+ existing_options = existing_doc.get('options', {}) or {}
+ except AssertionError:
+ fragment = doc['extends_documentation_fragment']
+ self.reporter.warning(
+ path=self.object_path,
+ code='missing-existing-doc-fragment',
+ msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
+ )
+ return
+ except Exception as e:
+ self.reporter.warning_trace(
+ path=self.object_path,
+ tracebk=e
+ )
+ self.reporter.warning(
+ path=self.object_path,
+ code='unknown-doc-fragment',
+ msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
+ )
+ return
+
+ try:
+ mod_collection_name = existing_doc.get('version_added_collection')
+ mod_version_added = self._create_strict_version(
+ str(existing_doc.get('version_added', '0.0')),
+ collection_name=mod_collection_name)
+ except ValueError:
+ mod_collection_name = self.collection_name
+ mod_version_added = self._create_strict_version('0.0')
+
+ options = doc.get('options', {}) or {}
+
+ should_be = '.'.join(ansible_version.split('.')[:2])
+ strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
+
+ for option, details in options.items():
+ try:
+ names = [option] + details.get('aliases', [])
+ except (TypeError, AttributeError):
+ # Reporting of this syntax error will be handled by schema validation.
+ continue
+
+ if any(name in existing_options for name in names):
+ # The option already existed. Make sure version_added didn't change.
+ for name in names:
+ existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
+ existing_version = existing_options.get(name, {}).get('version_added')
+ if existing_version:
+ break
+ current_collection_name = details.get('version_added_collection')
+ current_version = details.get('version_added')
+ if current_collection_name != existing_collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added-collection',
+ msg=('version_added for existing option (%s) should '
+ 'belong to collection %r. Currently belongs to %r' %
+ (option, current_collection_name, existing_collection_name))
+ )
+ elif str(current_version) != str(existing_version):
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added',
+ msg=('version_added for existing option (%s) should '
+ 'be %r. Currently %r' %
+ (option, existing_version, current_version))
+ )
+ continue
+
+ try:
+ collection_name = details.get('version_added_collection')
+ version_added = self._create_strict_version(
+ str(details.get('version_added', '0.0')),
+ collection_name=collection_name)
+ except ValueError as e:
+ # already reported during schema validation
+ continue
+
+ if collection_name != self.collection_name:
+ continue
+ if (strict_ansible_version != mod_version_added and
+ (version_added < strict_ansible_version or
+ strict_ansible_version < version_added)):
+ self.reporter.error(
+ path=self.object_path,
+ code='option-incorrect-version-added',
+ msg=('version_added for new option (%s) should '
+ 'be %r. Currently %r' %
+ (option, should_be, version_added))
+ )
+
+ return existing_doc
+
+ @staticmethod
+ def is_blacklisted(path):
+ base_name = os.path.basename(path)
+ file_name = os.path.splitext(base_name)[0]
+
+ if file_name.startswith('_') and os.path.islink(path):
+ return True
+
+ if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.BLACKLIST):
+ return True
+
+ for pat in ModuleValidator.BLACKLIST_PATTERNS:
+ if fnmatch(base_name, pat):
+ return True
+
+ return False
+
+ def validate(self):
+ super(ModuleValidator, self).validate()
+ if not self._python_module() and not self._powershell_module():
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-extension',
+ msg=('Official Ansible modules must have a .py '
+ 'extension for python modules or a .ps1 '
+ 'for powershell modules')
+ )
+ self._python_module_override = True
+
+ if self._python_module() and self.ast is None:
+ self.reporter.error(
+ path=self.object_path,
+ code='python-syntax-error',
+ msg='Python SyntaxError while parsing module'
+ )
+ try:
+ compile(self.text, self.path, 'exec')
+ except Exception:
+ self.reporter.trace(
+ path=self.object_path,
+ tracebk=traceback.format_exc()
+ )
+ return
+
+ end_of_deprecation_should_be_removed_only = False
+ if self._python_module():
+ doc_info, docs = self._validate_docs()
+
+ # See if current version => deprecated.removed_in, ie, should be docs only
+ if docs and docs.get('deprecated', False):
+
+ if 'removed_in' in docs['deprecated']:
+ removed_in = None
+ collection_name = docs['deprecated'].get('removed_from_collection')
+ version = docs['deprecated']['removed_in']
+ if collection_name != self.collection_name:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-module-deprecation-source',
+ msg=('The deprecation version for a module must be added in this collection')
+ )
+ else:
+ try:
+ removed_in = self._create_strict_version(str(version), collection_name=collection_name)
+ except ValueError as e:
+ self.reporter.error(
+ path=self.object_path,
+ code='invalid-module-deprecation-version',
+ msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
+ )
+
+ if removed_in:
+ if not self.collection:
+ strict_ansible_version = self._create_strict_version(
+ '.'.join(ansible_version.split('.')[:2]), self.collection_name)
+ end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
+ elif self.collection_version:
+ strict_ansible_version = self.collection_version
+ end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
+
+ # handle deprecation by date
+ if 'removed_at_date' in docs['deprecated']:
+ try:
+ removed_at_date = docs['deprecated']['removed_at_date']
+ if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
+ msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
+ self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
+ except ValueError:
+ # This happens if the date cannot be parsed. This is already checked by the schema.
+ pass
+
+ if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
+ self._validate_ansible_module_call(docs)
+ self._check_for_sys_exit()
+ self._find_blacklist_imports()
+ main = self._find_main_call()
+ self._find_module_utils(main)
+ self._find_has_import()
+ first_callable = self._get_first_callable()
+ self._ensure_imports_below_docs(doc_info, first_callable)
+ self._check_for_subprocess()
+ self._check_for_os_call()
+
+ if self._powershell_module():
+ if self.basename in self.PS_DOC_BLACKLIST:
+ return
+
+ self._validate_ps_replacers()
+ docs_path = self._find_ps_docs_py_file()
+
+ # We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
+ pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
+ if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_BLACKLIST:
+ with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv:
+ docs = docs_mv._validate_docs()[1]
+ self._validate_ansible_module_call(docs)
+
+ self._check_gpl3_header()
+ if not self._just_docs() and not end_of_deprecation_should_be_removed_only:
+ self._check_interpreter(powershell=self._powershell_module())
+ self._check_type_instead_of_isinstance(
+ powershell=self._powershell_module()
+ )
+ if end_of_deprecation_should_be_removed_only:
+ # Ensure that `if __name__ == '__main__':` calls `removed_module()` which ensure that the module has no code in
+ main = self._find_main_call('removed_module')
+ # FIXME: Ensure that the version in the call to removed_module is less than +2.
+ # Otherwise it's time to remove the file (This may need to be done in another test to
+ # avoid breaking whenever the Ansible version bumps)
+
+
+class PythonPackageValidator(Validator):
+ BLACKLIST_FILES = frozenset(('__pycache__',))
+
+ def __init__(self, path, reporter=None):
+ super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
+
+ self.path = path
+ self.basename = os.path.basename(path)
+
+ @property
+ def object_name(self):
+ return self.basename
+
+ @property
+ def object_path(self):
+ return self.path
+
+ def validate(self):
+ super(PythonPackageValidator, self).validate()
+
+ if self.basename in self.BLACKLIST_FILES:
+ return
+
+ init_file = os.path.join(self.path, '__init__.py')
+ if not os.path.exists(init_file):
+ self.reporter.error(
+ path=self.object_path,
+ code='subdirectory-missing-init',
+ msg='Ansible module subdirectories must contain an __init__.py'
+ )
+
+
+def setup_collection_loader():
+ collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep)
+ _AnsibleCollectionFinder(collections_paths)
+
+
+def re_compile(value):
+ """
+ Argparse expects things to raise TypeError, re.compile raises an re.error
+ exception
+
+ This function is a shorthand to convert the re.error exception to a
+ TypeError
+ """
+
+ try:
+ return re.compile(value)
+ except re.error as e:
+ raise TypeError(e)
+
+
+def run():
+ parser = argparse.ArgumentParser(prog="validate-modules")
+ parser.add_argument('modules', nargs='+',
+ help='Path to module or module directory')
+ parser.add_argument('-w', '--warnings', help='Show warnings',
+ action='store_true')
+ parser.add_argument('--exclude', help='RegEx exclusion pattern',
+ type=re_compile)
+ parser.add_argument('--arg-spec', help='Analyze module argument spec',
+ action='store_true', default=False)
+ parser.add_argument('--base-branch', default=None,
+ help='Used in determining if new options were added')
+ parser.add_argument('--format', choices=['json', 'plain'], default='plain',
+ help='Output format. Default: "%(default)s"')
+ parser.add_argument('--output', default='-',
+ help='Output location, use "-" for stdout. '
+ 'Default "%(default)s"')
+ parser.add_argument('--collection',
+ help='Specifies the path to the collection, when '
+ 'validating files within a collection. Ensure '
+ 'that ANSIBLE_COLLECTIONS_PATH is set so the '
+ 'contents of the collection can be located')
+ parser.add_argument('--collection-version',
+ help='The collection\'s version number used to check '
+ 'deprecations')
+
+ args = parser.parse_args()
+
+ args.modules = [m.rstrip('/') for m in args.modules]
+
+ reporter = Reporter()
+ git_cache = GitCache(args.base_branch)
+
+ check_dirs = set()
+
+ routing = None
+ if args.collection:
+ setup_collection_loader()
+ routing_file = 'meta/runtime.yml'
+ # Load meta/runtime.yml if it exists, as it may contain deprecation information
+ if os.path.isfile(routing_file):
+ try:
+ with open(routing_file) as f:
+ routing = yaml.safe_load(f)
+ except yaml.error.MarkedYAMLError as ex:
+ print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
+ except Exception as ex: # pylint: disable=broad-except
+ print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex))))
+
+ for module in args.modules:
+ if os.path.isfile(module):
+ path = module
+ if args.exclude and args.exclude.search(path):
+ continue
+ if ModuleValidator.is_blacklisted(path):
+ continue
+ with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
+ analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
+ git_cache=git_cache, reporter=reporter, routing=routing) as mv1:
+ mv1.validate()
+ check_dirs.add(os.path.dirname(path))
+
+ for root, dirs, files in os.walk(module):
+ basedir = root[len(module) + 1:].split('/', 1)[0]
+ if basedir in BLACKLIST_DIRS:
+ continue
+ for dirname in dirs:
+ if root == module and dirname in BLACKLIST_DIRS:
+ continue
+ path = os.path.join(root, dirname)
+ if args.exclude and args.exclude.search(path):
+ continue
+ check_dirs.add(path)
+
+ for filename in files:
+ path = os.path.join(root, filename)
+ if args.exclude and args.exclude.search(path):
+ continue
+ if ModuleValidator.is_blacklisted(path):
+ continue
+ with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
+ analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
+ git_cache=git_cache, reporter=reporter, routing=routing) as mv2:
+ mv2.validate()
+
+ if not args.collection:
+ for path in sorted(check_dirs):
+ pv = PythonPackageValidator(path, reporter=reporter)
+ pv.validate()
+
+ if args.format == 'plain':
+ sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
+ else:
+ sys.exit(reporter.json(warnings=args.warnings, output=args.output))
+
+
+class GitCache:
+ def __init__(self, base_branch):
+ self.base_branch = base_branch
+
+ if self.base_branch:
+ self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/'])
+ else:
+ self.base_tree = []
+
+ try:
+ self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/'])
+ except GitError as ex:
+ if ex.status == 128:
+ # fallback when there is no .git directory
+ self.head_tree = self._get_module_files()
+ else:
+ raise
+ except OSError as ex:
+ if ex.errno == errno.ENOENT:
+ # fallback when git is not installed
+ self.head_tree = self._get_module_files()
+ else:
+ raise
+
+ self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1'))
+
+ self.base_module_paths.pop('__init__.py', None)
+
+ self.head_aliased_modules = set()
+
+ for path in self.head_tree:
+ filename = os.path.basename(path)
+
+ if filename.startswith('_') and filename != '__init__.py':
+ if os.path.islink(path):
+ self.head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
+
+ @staticmethod
+ def _get_module_files():
+ module_files = []
+
+ for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'):
+ for file_name in file_names:
+ module_files.append(os.path.join(dir_path, file_name))
+
+ return module_files
+
+ @staticmethod
+ def _git(args):
+ cmd = ['git'] + args
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ if p.returncode != 0:
+ raise GitError(stderr, p.returncode)
+ return stdout.decode('utf-8').splitlines()
+
+
+class GitError(Exception):
+ def __init__(self, message, status):
+ super(GitError, self).__init__(message)
+
+ self.status = status
+
+
+def main():
+ try:
+ run()
+ except KeyboardInterrupt:
+ pass
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py
new file mode 100644
index 00000000..ac025291
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/module_args.py
@@ -0,0 +1,170 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2016 Matt Martz <matt@sivel.net>
+# Copyright (C) 2016 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import runpy
+import json
+import os
+import subprocess
+import sys
+
+from contextlib import contextmanager
+
+from ansible.executor.powershell.module_manifest import PSModuleDepFinder
+from ansible.module_utils.basic import FILE_COMMON_ARGUMENTS
+from ansible.module_utils.six import reraise
+from ansible.module_utils._text import to_bytes, to_text
+
+from .utils import CaptureStd, find_executable, get_module_name_from_filename
+
+
+class AnsibleModuleCallError(RuntimeError):
+ pass
+
+
+class AnsibleModuleImportError(ImportError):
+ pass
+
+
+class AnsibleModuleNotInitialized(Exception):
+ pass
+
+
+class _FakeAnsibleModuleInit:
+ def __init__(self):
+ self.args = tuple()
+ self.kwargs = {}
+ self.called = False
+
+ def __call__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+ self.called = True
+ raise AnsibleModuleCallError('AnsibleModuleCallError')
+
+
+def _fake_load_params():
+ pass
+
+
+@contextmanager
+def setup_env(filename):
+ # Used to clean up imports later
+ pre_sys_modules = list(sys.modules.keys())
+
+ fake = _FakeAnsibleModuleInit()
+ module = __import__('ansible.module_utils.basic').module_utils.basic
+ _original_init = module.AnsibleModule.__init__
+ _original_load_params = module._load_params
+ setattr(module.AnsibleModule, '__init__', fake)
+ setattr(module, '_load_params', _fake_load_params)
+
+ try:
+ yield fake
+ finally:
+ setattr(module.AnsibleModule, '__init__', _original_init)
+ setattr(module, '_load_params', _original_load_params)
+
+ # Clean up imports to prevent issues with mutable data being used in modules
+ for k in list(sys.modules.keys()):
+ # It's faster if we limit to items in ansible.module_utils
+ # But if this causes problems later, we should remove it
+ if k not in pre_sys_modules and k.startswith('ansible.module_utils.'):
+ del sys.modules[k]
+
+
+def get_ps_argument_spec(filename, collection):
+ fqc_name = get_module_name_from_filename(filename, collection)
+
+ pwsh = find_executable('pwsh')
+ if not pwsh:
+ raise FileNotFoundError('Required program for PowerShell arg spec inspection "pwsh" not found.')
+
+ module_path = os.path.join(os.getcwd(), filename)
+ b_module_path = to_bytes(module_path, errors='surrogate_or_strict')
+ with open(b_module_path, mode='rb') as module_fd:
+ b_module_data = module_fd.read()
+
+ ps_dep_finder = PSModuleDepFinder()
+ ps_dep_finder.scan_module(b_module_data, fqn=fqc_name)
+
+ # For ps_argspec.ps1 to compile Ansible.Basic it also needs the AddType module_util.
+ ps_dep_finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1", None), wrapper=False)
+
+ util_manifest = json.dumps({
+ 'module_path': to_text(module_path, errors='surrogiate_or_strict'),
+ 'ansible_basic': ps_dep_finder.cs_utils_module["Ansible.Basic"]['path'],
+ 'ps_utils': dict([(name, info['path']) for name, info in ps_dep_finder.ps_modules.items()]),
+ })
+
+ script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ps_argspec.ps1')
+ proc = subprocess.Popen([script_path, util_manifest], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=False)
+ stdout, stderr = proc.communicate()
+
+ if proc.returncode != 0:
+ raise AnsibleModuleImportError("STDOUT:\n%s\nSTDERR:\n%s" % (stdout.decode('utf-8'), stderr.decode('utf-8')))
+
+ kwargs = json.loads(stdout)
+
+ # the validate-modules code expects the options spec to be under the argument_spec key not options as set in PS
+ kwargs['argument_spec'] = kwargs.pop('options', {})
+
+ return kwargs['argument_spec'], (), kwargs
+
+
+def get_py_argument_spec(filename, collection):
+ name = get_module_name_from_filename(filename, collection)
+
+ with setup_env(filename) as fake:
+ try:
+ with CaptureStd():
+ runpy.run_module(name, run_name='__main__', alter_sys=True)
+ except AnsibleModuleCallError:
+ pass
+ except BaseException as e:
+ # we want to catch all exceptions here, including sys.exit
+ reraise(AnsibleModuleImportError, AnsibleModuleImportError('%s' % e), sys.exc_info()[2])
+
+ if not fake.called:
+ raise AnsibleModuleNotInitialized()
+
+ try:
+ # for ping kwargs == {'argument_spec':{'data':{'type':'str','default':'pong'}}, 'supports_check_mode':True}
+ if 'argument_spec' in fake.kwargs:
+ argument_spec = fake.kwargs['argument_spec']
+ else:
+ argument_spec = fake.args[0]
+ # If add_file_common_args is truish, add options from FILE_COMMON_ARGUMENTS when not present.
+ # This is the only modification to argument_spec done by AnsibleModule itself, and which is
+ # not caught by setup_env's AnsibleModule replacement
+ if fake.kwargs.get('add_file_common_args'):
+ for k, v in FILE_COMMON_ARGUMENTS.items():
+ if k not in argument_spec:
+ argument_spec[k] = v
+ return argument_spec, fake.args, fake.kwargs
+ except (TypeError, IndexError):
+ return {}, (), {}
+
+
+def get_argument_spec(filename, collection):
+ if filename.endswith('.py'):
+ return get_py_argument_spec(filename, collection)
+ else:
+ return get_ps_argument_spec(filename, collection)
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1 b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1
new file mode 100755
index 00000000..5ceb9d50
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/ps_argspec.ps1
@@ -0,0 +1,110 @@
+#!/usr/bin/env pwsh
+#Requires -Version 6
+
+Set-StrictMode -Version 2.0
+$ErrorActionPreference = "Stop"
+$WarningPreference = "Stop"
+
+Function Resolve-CircularReference {
+ <#
+ .SYNOPSIS
+ Removes known types that cause a circular reference in their json serialization.
+
+ .PARAMETER Hash
+ The hash to scan for circular references
+ #>
+ [CmdletBinding()]
+ param (
+ [Parameter(Mandatory=$true)]
+ [System.Collections.IDictionary]
+ $Hash
+ )
+
+ foreach ($key in [String[]]$Hash.Keys) {
+ $value = $Hash[$key]
+ if ($value -is [System.Collections.IDictionary]) {
+ Resolve-CircularReference -Hash $value
+ } elseif ($value -is [Array] -or $value -is [System.Collections.IList]) {
+ $values = @(foreach ($v in $value) {
+ if ($v -is [System.Collections.IDictionary]) {
+ Resolve-CircularReference -Hash $v
+ }
+ ,$v
+ })
+ $Hash[$key] = $values
+ } elseif ($value -is [DateTime]) {
+ $Hash[$key] = $value.ToString("yyyy-MM-dd")
+ } elseif ($value -is [delegate]) {
+ # Type can be set to a delegate function which defines it's own type. For the documentation we just
+ # reflection that as raw
+ if ($key -eq 'type') {
+ $Hash[$key] = 'raw'
+ } else {
+ $Hash[$key] = $value.ToString() # Shouldn't ever happen but just in case.
+ }
+ }
+ }
+}
+
+$manifest = ConvertFrom-Json -InputObject $args[0] -AsHashtable
+if (-not $manifest.Contains('module_path') -or -not $manifest.module_path) {
+ Write-Error -Message "No module specified."
+ exit 1
+}
+$module_path = $manifest.module_path
+
+# Check if the path is relative and get the full path to the module
+if (-not ([System.IO.Path]::IsPathRooted($module_path))) {
+ $module_path = $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($module_path)
+}
+
+if (-not (Test-Path -LiteralPath $module_path -PathType Leaf)) {
+ Write-Error -Message "The module at '$module_path' does not exist."
+ exit 1
+}
+
+$module_code = Get-Content -LiteralPath $module_path -Raw
+
+$powershell = [PowerShell]::Create()
+$powershell.Runspace.SessionStateProxy.SetVariable("ErrorActionPreference", "Stop")
+
+# Load the PowerShell module utils as the module may be using them to refer to shared module options. Currently we
+# can only load the PowerShell utils due to cross platform compatibility issues.
+if ($manifest.Contains('ps_utils')) {
+ foreach ($util_info in $manifest.ps_utils.GetEnumerator()) {
+ $util_name = $util_info.Key
+ $util_path = $util_info.Value
+
+ if (-not (Test-Path -LiteralPath $util_path -PathType Leaf)) {
+ # Failed to find the util path, just silently ignore for now and hope for the best.
+ continue
+ }
+
+ $util_sb = [ScriptBlock]::Create((Get-Content -LiteralPath $util_path -Raw))
+ $powershell.AddCommand('New-Module').AddParameters(@{
+ Name = $util_name
+ ScriptBlock = $util_sb
+ }) > $null
+ $powershell.AddCommand('Import-Module').AddParameter('WarningAction', 'SilentlyContinue') > $null
+ $powershell.AddCommand('Out-Null').AddStatement() > $null
+
+ # Also import it into the current runspace in case ps_argspec.ps1 needs to use it.
+ $null = New-Module -Name $util_name -ScriptBlock $util_sb | Import-Module -WarningAction SilentlyContinue
+ }
+}
+
+Add-CSharpType -References @(Get-Content -LiteralPath $manifest.ansible_basic -Raw)
+[Ansible.Basic.AnsibleModule]::_DebugArgSpec = $true
+
+$powershell.AddScript($module_code) > $null
+$powershell.Invoke() > $null
+
+if ($powershell.HadErrors) {
+ $powershell.Streams.Error
+ exit 1
+}
+
+$arg_spec = $powershell.Runspace.SessionStateProxy.GetVariable('ansibleTestArgSpec')
+Resolve-CircularReference -Hash $arg_spec
+
+ConvertTo-Json -InputObject $arg_spec -Compress -Depth 99
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py
new file mode 100644
index 00000000..42a2ada4
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/schema.py
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+
+# Copyright: (c) 2015, Matt Martz <matt@sivel.net>
+# Copyright: (c) 2015, Rackspace US, Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from distutils.version import StrictVersion
+from functools import partial
+
+from voluptuous import ALLOW_EXTRA, PREVENT_EXTRA, All, Any, Invalid, Length, Required, Schema, Self, ValueInvalid
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.collections import is_iterable
+from ansible.utils.version import SemanticVersion
+
+from .utils import parse_isodate
+
+list_string_types = list(string_types)
+tuple_string_types = tuple(string_types)
+any_string_types = Any(*string_types)
+
+# Valid DOCUMENTATION.author lines
+# Based on Ansibulbot's extract_github_id()
+# author: First Last (@name) [optional anything]
+# "Ansible Core Team" - Used by the Bot
+# "Michael DeHaan" - nop
+# "OpenStack Ansible SIG" - OpenStack does not use GitHub
+# "Name (!UNKNOWN)" - For the few untraceable authors
+author_line = re.compile(r'^\w.*(\(@([\w-]+)\)|!UNKNOWN)(?![\w.])|^Ansible Core Team$|^Michael DeHaan$|^OpenStack Ansible SIG$')
+
+
+def _add_ansible_error_code(exception, error_code):
+ setattr(exception, 'ansible_error_code', error_code)
+ return exception
+
+
+def isodate(v, error_code=None):
+ try:
+ parse_isodate(v, allow_date=True)
+ except ValueError as e:
+ raise _add_ansible_error_code(Invalid(str(e)), error_code or 'ansible-invalid-date')
+ return v
+
+
+COLLECTION_NAME_RE = re.compile('^([^.]+.[^.]+)$')
+
+
+def collection_name(v, error_code=None):
+ if not isinstance(v, string_types):
+ raise _add_ansible_error_code(
+ Invalid('Collection name must be a string'), error_code or 'collection-invalid-name')
+ m = COLLECTION_NAME_RE.match(v)
+ if not m:
+ raise _add_ansible_error_code(
+ Invalid('Collection name must be of format `<namespace>.<name>`'), error_code or 'collection-invalid-name')
+ return v
+
+
+def version(for_collection=False):
+ if for_collection:
+ # We do not accept floats for versions in collections
+ return Any(*string_types)
+ return Any(float, *string_types)
+
+
+def date(error_code=None):
+ return Any(isodate, error_code=error_code)
+
+
+def is_callable(v):
+ if not callable(v):
+ raise ValueInvalid('not a valid value')
+ return v
+
+
+def sequence_of_sequences(min=None, max=None):
+ return All(
+ Any(
+ None,
+ [Any(list, tuple)],
+ tuple([Any(list, tuple)]),
+ ),
+ Any(
+ None,
+ [Length(min=min, max=max)],
+ tuple([Length(min=min, max=max)]),
+ ),
+ )
+
+
+seealso_schema = Schema(
+ [
+ Any(
+ {
+ Required('module'): Any(*string_types),
+ 'description': Any(*string_types),
+ },
+ {
+ Required('ref'): Any(*string_types),
+ Required('description'): Any(*string_types),
+ },
+ {
+ Required('name'): Any(*string_types),
+ Required('link'): Any(*string_types),
+ Required('description'): Any(*string_types),
+ },
+ ),
+ ]
+)
+
+
+argument_spec_types = ['bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw',
+ 'sid', 'str']
+
+
+argument_spec_modifiers = {
+ 'mutually_exclusive': sequence_of_sequences(min=2),
+ 'required_together': sequence_of_sequences(min=2),
+ 'required_one_of': sequence_of_sequences(min=2),
+ 'required_if': sequence_of_sequences(min=3, max=4),
+ 'required_by': Schema({str: Any(list_string_types, tuple_string_types, *string_types)}),
+}
+
+
+def no_required_with_default(v):
+ if v.get('default') and v.get('required'):
+ raise Invalid('required=True cannot be supplied with a default')
+ return v
+
+
+def elements_with_list(v):
+ if v.get('elements') and v.get('type') != 'list':
+ raise Invalid('type must be list to use elements')
+ return v
+
+
+def options_with_apply_defaults(v):
+ if v.get('apply_defaults') and not v.get('options'):
+ raise Invalid('apply_defaults=True requires options to be set')
+ return v
+
+
+def option_deprecation(v):
+ if v.get('removed_in_version') or v.get('removed_at_date'):
+ if v.get('removed_in_version') and v.get('removed_at_date'):
+ raise _add_ansible_error_code(
+ Invalid('Only one of removed_in_version and removed_at_date must be specified'),
+ error_code='deprecation-either-date-or-version')
+ if not v.get('removed_from_collection'):
+ raise _add_ansible_error_code(
+ Invalid('If removed_in_version or removed_at_date is specified, '
+ 'removed_from_collection must be specified as well'),
+ error_code='deprecation-collection-missing')
+ return
+ if v.get('removed_from_collection'):
+ raise Invalid('removed_from_collection cannot be specified without either '
+ 'removed_in_version or removed_at_date')
+
+
+def argument_spec_schema(for_collection):
+ any_string_types = Any(*string_types)
+ schema = {
+ any_string_types: {
+ 'type': Any(is_callable, *argument_spec_types),
+ 'elements': Any(*argument_spec_types),
+ 'default': object,
+ 'fallback': Any(
+ (is_callable, list_string_types),
+ [is_callable, list_string_types],
+ ),
+ 'choices': Any([object], (object,)),
+ 'required': bool,
+ 'no_log': bool,
+ 'aliases': Any(list_string_types, tuple(list_string_types)),
+ 'apply_defaults': bool,
+ 'removed_in_version': version(for_collection),
+ 'removed_at_date': date(),
+ 'removed_from_collection': collection_name,
+ 'options': Self,
+ 'deprecated_aliases': Any([Any(
+ {
+ Required('name'): Any(*string_types),
+ Required('date'): date(),
+ Required('collection_name'): collection_name,
+ },
+ {
+ Required('name'): Any(*string_types),
+ Required('version'): version(for_collection),
+ Required('collection_name'): collection_name,
+ },
+ )]),
+ }
+ }
+ schema[any_string_types].update(argument_spec_modifiers)
+ schemas = All(
+ schema,
+ Schema({any_string_types: no_required_with_default}),
+ Schema({any_string_types: elements_with_list}),
+ Schema({any_string_types: options_with_apply_defaults}),
+ Schema({any_string_types: option_deprecation}),
+ )
+ return Schema(schemas)
+
+
+def ansible_module_kwargs_schema(for_collection):
+ schema = {
+ 'argument_spec': argument_spec_schema(for_collection),
+ 'bypass_checks': bool,
+ 'no_log': bool,
+ 'check_invalid_arguments': Any(None, bool),
+ 'add_file_common_args': bool,
+ 'supports_check_mode': bool,
+ }
+ schema.update(argument_spec_modifiers)
+ return Schema(schema)
+
+
+json_value = Schema(Any(
+ None,
+ int,
+ float,
+ [Self],
+ *(list({str_type: Self} for str_type in string_types) + list(string_types))
+))
+
+
+def version_added(v, error_code='version-added-invalid', accept_historical=False):
+ if 'version_added' in v:
+ version_added = v.get('version_added')
+ if isinstance(version_added, string_types):
+ # If it is not a string, schema validation will have already complained
+ # - or we have a float and we are in ansible/ansible, in which case we're
+ # also happy.
+ if v.get('version_added_collection') == 'ansible.builtin':
+ if version_added == 'historical' and accept_historical:
+ return v
+ try:
+ version = StrictVersion()
+ version.parse(version_added)
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('version_added (%r) is not a valid ansible-base version: '
+ '%s' % (version_added, exc)),
+ error_code=error_code)
+ else:
+ try:
+ version = SemanticVersion()
+ version.parse(version_added)
+ except ValueError as exc:
+ raise _add_ansible_error_code(
+ Invalid('version_added (%r) is not a valid collection version '
+ '(see specification at https://semver.org/): '
+ '%s' % (version_added, exc)),
+ error_code=error_code)
+ elif 'version_added_collection' in v:
+ # Must have been manual intervention, since version_added_collection is only
+ # added automatically when version_added is present
+ raise Invalid('version_added_collection cannot be specified without version_added')
+ return v
+
+
+def list_dict_option_schema(for_collection):
+ suboption_schema = Schema(
+ {
+ Required('description'): Any(list_string_types, *string_types),
+ 'required': bool,
+ 'choices': list,
+ 'aliases': Any(list_string_types),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'default': json_value,
+ # Note: Types are strings, not literal bools, such as True or False
+ 'type': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ # Recursive suboptions
+ 'suboptions': Any(None, *list({str_type: Self} for str_type in string_types)),
+ },
+ extra=PREVENT_EXTRA
+ )
+
+ # This generates list of dicts with keys from string_types and suboption_schema value
+ # for example in Python 3: {str: suboption_schema}
+ list_dict_suboption_schema = [{str_type: suboption_schema} for str_type in string_types]
+
+ option_schema = Schema(
+ {
+ Required('description'): Any(list_string_types, *string_types),
+ 'required': bool,
+ 'choices': list,
+ 'aliases': Any(list_string_types),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'default': json_value,
+ 'suboptions': Any(None, *list_dict_suboption_schema),
+ # Note: Types are strings, not literal bools, such as True or False
+ 'type': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ },
+ extra=PREVENT_EXTRA
+ )
+
+ option_version_added = Schema(
+ All({
+ 'suboptions': Any(None, *[{str_type: Self} for str_type in string_types]),
+ }, partial(version_added, error_code='option-invalid-version-added')),
+ extra=ALLOW_EXTRA
+ )
+
+ # This generates list of dicts with keys from string_types and option_schema value
+ # for example in Python 3: {str: option_schema}
+ return [{str_type: All(option_schema, option_version_added)} for str_type in string_types]
+
+
+def return_contains(v):
+ schema = Schema(
+ {
+ Required('contains'): Any(dict, list, *string_types)
+ },
+ extra=ALLOW_EXTRA
+ )
+ if v.get('type') == 'complex':
+ return schema(v)
+ return v
+
+
+def return_schema(for_collection):
+ return_contains_schema = Any(
+ All(
+ Schema(
+ {
+ Required('description'): Any(list_string_types, *string_types),
+ 'returned': Any(*string_types), # only returned on top level
+ Required('type'): Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'str'),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'sample': json_value,
+ 'example': json_value,
+ 'contains': Any(None, *list({str_type: Self} for str_type in string_types)),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ }
+ ),
+ Schema(return_contains),
+ Schema(partial(version_added, error_code='option-invalid-version-added')),
+ ),
+ Schema(type(None)),
+ )
+
+ # This generates list of dicts with keys from string_types and return_contains_schema value
+ # for example in Python 3: {str: return_contains_schema}
+ list_dict_return_contains_schema = [{str_type: return_contains_schema} for str_type in string_types]
+
+ return Any(
+ All(
+ Schema(
+ {
+ any_string_types: {
+ Required('description'): Any(list_string_types, *string_types),
+ Required('returned'): Any(*string_types),
+ Required('type'): Any('bool', 'complex', 'dict', 'float', 'int', 'list', 'str'),
+ 'version_added': version(for_collection),
+ 'version_added_collection': collection_name,
+ 'sample': json_value,
+ 'example': json_value,
+ 'contains': Any(None, *list_dict_return_contains_schema),
+ # in case of type='list' elements define type of individual item in list
+ 'elements': Any(None, 'bits', 'bool', 'bytes', 'dict', 'float', 'int', 'json', 'jsonarg', 'list', 'path', 'raw', 'sid', 'str'),
+ }
+ }
+ ),
+ Schema({any_string_types: return_contains}),
+ Schema({any_string_types: partial(version_added, error_code='option-invalid-version-added')}),
+ ),
+ Schema(type(None)),
+ )
+
+
+def deprecation_schema(for_collection):
+ main_fields = {
+ Required('why'): Any(*string_types),
+ Required('alternative'): Any(*string_types),
+ Required('removed_from_collection'): collection_name,
+ 'removed': Any(True),
+ }
+
+ date_schema = {
+ Required('removed_at_date'): date(),
+ }
+ date_schema.update(main_fields)
+
+ if for_collection:
+ version_schema = {
+ Required('removed_in'): version(for_collection),
+ }
+ else:
+ version_schema = {
+ # Only list branches that are deprecated or may have docs stubs in
+ # Deprecation cycle changed at 2.4 (though not retroactively)
+ # 2.3 -> removed_in: "2.5" + n for docs stub
+ # 2.4 -> removed_in: "2.8" + n for docs stub
+ Required('removed_in'): Any(
+ "2.2", "2.3", "2.4", "2.5", "2.6", "2.8", "2.9", "2.10", "2.11", "2.12", "2.13", "2.14"),
+ }
+ version_schema.update(main_fields)
+
+ return Any(
+ Schema(version_schema, extra=PREVENT_EXTRA),
+ Schema(date_schema, extra=PREVENT_EXTRA),
+ )
+
+
+def author(value):
+ if value is None:
+ return value # let schema checks handle
+
+ if not is_iterable(value):
+ value = [value]
+
+ for line in value:
+ if not isinstance(line, string_types):
+ continue # let schema checks handle
+ m = author_line.search(line)
+ if not m:
+ raise Invalid("Invalid author")
+
+ return value
+
+
+def doc_schema(module_name, for_collection=False, deprecated_module=False):
+
+ if module_name.startswith('_'):
+ module_name = module_name[1:]
+ deprecated_module = True
+ doc_schema_dict = {
+ Required('module'): module_name,
+ Required('short_description'): Any(*string_types),
+ Required('description'): Any(list_string_types, *string_types),
+ Required('author'): All(Any(None, list_string_types, *string_types), author),
+ 'notes': Any(None, list_string_types),
+ 'seealso': Any(None, seealso_schema),
+ 'requirements': list_string_types,
+ 'todo': Any(None, list_string_types, *string_types),
+ 'options': Any(None, *list_dict_option_schema(for_collection)),
+ 'extends_documentation_fragment': Any(list_string_types, *string_types),
+ 'version_added_collection': collection_name,
+ }
+
+ if for_collection:
+ # Optional
+ doc_schema_dict['version_added'] = version(for_collection=True)
+ else:
+ doc_schema_dict[Required('version_added')] = version(for_collection=False)
+
+ if deprecated_module:
+ deprecation_required_scheme = {
+ Required('deprecated'): Any(deprecation_schema(for_collection=for_collection)),
+ }
+
+ doc_schema_dict.update(deprecation_required_scheme)
+ return Schema(
+ All(
+ Schema(
+ doc_schema_dict,
+ extra=PREVENT_EXTRA
+ ),
+ partial(version_added, error_code='module-invalid-version-added', accept_historical=not for_collection),
+ )
+ )
+
+
+# Things to add soon
+####################
+# 1) Recursively validate `type: complex` fields
+# This will improve documentation, though require fair amount of module tidyup
+
+# Possible Future Enhancements
+##############################
+
+# 1) Don't allow empty options for choices, aliases, etc
+# 2) If type: bool ensure choices isn't set - perhaps use Exclusive
+# 3) both version_added should be quoted floats
+
+# Tool that takes JSON and generates RETURN skeleton (needs to support complex structures)
diff --git a/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py
new file mode 100644
index 00000000..939ae651
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/utils.py
@@ -0,0 +1,218 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 Matt Martz <matt@sivel.net>
+# Copyright (C) 2015 Rackspace US, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import datetime
+import os
+import re
+import sys
+
+from io import BytesIO, TextIOWrapper
+
+import yaml
+import yaml.reader
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six import string_types
+
+
+class AnsibleTextIOWrapper(TextIOWrapper):
+ def write(self, s):
+ super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace'))
+
+
+def find_executable(executable, cwd=None, path=None):
+ """Finds the full path to the executable specified"""
+ match = None
+ real_cwd = os.getcwd()
+
+ if not cwd:
+ cwd = real_cwd
+
+ if os.path.dirname(executable):
+ target = os.path.join(cwd, executable)
+ if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
+ match = executable
+ else:
+ path = os.environ.get('PATH', os.path.defpath)
+
+ path_dirs = path.split(os.path.pathsep)
+ seen_dirs = set()
+
+ for path_dir in path_dirs:
+ if path_dir in seen_dirs:
+ continue
+
+ seen_dirs.add(path_dir)
+
+ if os.path.abspath(path_dir) == real_cwd:
+ path_dir = cwd
+
+ candidate = os.path.join(path_dir, executable)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ match = candidate
+ break
+
+ return match
+
+
+def find_globals(g, tree):
+ """Uses AST to find globals in an ast tree"""
+ for child in tree:
+ if hasattr(child, 'body') and isinstance(child.body, list):
+ find_globals(g, child.body)
+ elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
+ g.add(child.name)
+ continue
+ elif isinstance(child, ast.Assign):
+ try:
+ g.add(child.targets[0].id)
+ except (IndexError, AttributeError):
+ pass
+ elif isinstance(child, ast.Import):
+ g.add(child.names[0].name)
+ elif isinstance(child, ast.ImportFrom):
+ for name in child.names:
+ g_name = name.asname or name.name
+ if g_name == '*':
+ continue
+ g.add(g_name)
+
+
+class CaptureStd():
+ """Context manager to handle capturing stderr and stdout"""
+
+ def __enter__(self):
+ self.sys_stdout = sys.stdout
+ self.sys_stderr = sys.stderr
+ sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding)
+ sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ sys.stdout = self.sys_stdout
+ sys.stderr = self.sys_stderr
+
+ def get(self):
+ """Return ``(stdout, stderr)``"""
+
+ return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue()
+
+
+def get_module_name_from_filename(filename, collection):
+ # Calculate the module's name so that relative imports work correctly
+ if collection:
+ # collection is a relative path, example: ansible_collections/my_namespace/my_collection
+ # filename is a relative path, example: plugins/modules/my_module.py
+ path = os.path.join(collection, filename)
+ else:
+ # filename is a relative path, example: lib/ansible/modules/system/ping.py
+ path = os.path.relpath(filename, 'lib')
+
+ name = os.path.splitext(path)[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def parse_yaml(value, lineno, module, name, load_all=False):
+ traces = []
+ errors = []
+ data = None
+
+ if load_all:
+ loader = yaml.safe_load_all
+ else:
+ loader = yaml.safe_load
+
+ try:
+ data = loader(value)
+ if load_all:
+ data = list(data)
+ except yaml.MarkedYAMLError as e:
+ e.problem_mark.line += lineno - 1
+ e.problem_mark.name = '%s.%s' % (module, name)
+ errors.append({
+ 'msg': '%s is not valid YAML' % name,
+ 'line': e.problem_mark.line + 1,
+ 'column': e.problem_mark.column + 1
+ })
+ traces.append(e)
+ except yaml.reader.ReaderError as e:
+ traces.append(e)
+ # TODO: Better line/column detection
+ errors.append({
+ 'msg': ('%s is not valid YAML. Character '
+ '0x%x at position %d.' % (name, e.character, e.position)),
+ 'line': lineno
+ })
+ except yaml.YAMLError as e:
+ traces.append(e)
+ errors.append({
+ 'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e),
+ 'line': lineno
+ })
+
+ return data, errors, traces
+
+
+def is_empty(value):
+ """Evaluate null like values excluding False"""
+ if value is False:
+ return False
+ return not bool(value)
+
+
+def compare_unordered_lists(a, b):
+ """Safe list comparisons
+
+ Supports:
+ - unordered lists
+ - unhashable elements
+ """
+ return len(a) == len(b) and all(x in b for x in a)
+
+
+class NoArgsAnsibleModule(AnsibleModule):
+ """AnsibleModule that does not actually load params. This is used to get access to the
+ methods within AnsibleModule without having to fake a bunch of data
+ """
+ def _load_params(self):
+ self.params = {'_ansible_selinux_special_fs': [], '_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False, '_ansible_check_mode': False}
+
+
+def parse_isodate(v, allow_date):
+ if allow_date:
+ if isinstance(v, datetime.date):
+ return v
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD) or YAML date'
+ else:
+ msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
+ if not isinstance(v, string_types):
+ raise ValueError(msg)
+ # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
+ # we have to do things manually.
+ if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v):
+ raise ValueError(msg)
+ try:
+ return datetime.datetime.strptime(v, '%Y-%m-%d').date()
+ except ValueError:
+ raise ValueError(msg)
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml
new file mode 100644
index 00000000..45d8b7ad
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/config/default.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: {max-spaces-inside: 1, level: error}
+ brackets: {max-spaces-inside: 1, level: error}
+ colons: {max-spaces-after: -1, level: error}
+ commas: {max-spaces-after: -1, level: error}
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: {max: 3, level: error}
+ hyphens: {level: error}
+ indentation: disable
+ key-duplicates: enable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml
new file mode 100644
index 00000000..da7e6049
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/config/modules.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: disable
+ brackets: disable
+ colons: disable
+ commas: disable
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: disable
+ hyphens: disable
+ indentation: disable
+ key-duplicates: enable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml b/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml
new file mode 100644
index 00000000..6d418137
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/config/plugins.yml
@@ -0,0 +1,19 @@
+extends: default
+
+rules:
+ braces: disable
+ brackets: disable
+ colons: disable
+ commas: disable
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines: disable
+ hyphens: disable
+ indentation: disable
+ key-duplicates: disable
+ line-length: disable
+ new-line-at-end-of-file: disable
+ new-lines: {type: unix}
+ trailing-spaces: disable
+ truthy: disable
diff --git a/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py b/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py
new file mode 100644
index 00000000..c9cdc19c
--- /dev/null
+++ b/test/lib/ansible_test/_data/sanity/yamllint/yamllinter.py
@@ -0,0 +1,249 @@
+#!/usr/bin/env python
+"""Wrapper around yamllint that supports YAML embedded in Ansible modules."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import json
+import os
+import sys
+
+import yaml
+from yaml.resolver import Resolver
+from yaml.constructor import SafeConstructor
+from yaml.error import MarkedYAMLError
+from _yaml import CParser # pylint: disable=no-name-in-module
+
+from yamllint import linter
+from yamllint.config import YamlLintConfig
+
+
+def main():
+ """Main program body."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ checker = YamlChecker()
+ checker.check(paths)
+ checker.report()
+
+
+class TestConstructor(SafeConstructor):
+ """Yaml Safe Constructor that knows about Ansible tags"""
+
+
+TestConstructor.add_constructor(
+ u'!unsafe',
+ TestConstructor.construct_yaml_str)
+
+
+TestConstructor.add_constructor(
+ u'!vault',
+ TestConstructor.construct_yaml_str)
+
+
+TestConstructor.add_constructor(
+ u'!vault-encrypted',
+ TestConstructor.construct_yaml_str)
+
+
+class TestLoader(CParser, TestConstructor, Resolver):
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ TestConstructor.__init__(self)
+ Resolver.__init__(self)
+
+
+class YamlChecker:
+ """Wrapper around yamllint that supports YAML embedded in Ansible modules."""
+ def __init__(self):
+ self.messages = []
+
+ def report(self):
+ """Print yamllint report to stdout."""
+ report = dict(
+ messages=self.messages,
+ )
+
+ print(json.dumps(report, indent=4, sort_keys=True))
+
+ def check(self, paths):
+ """
+ :type paths: str
+ """
+ config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config')
+
+ yaml_conf = YamlLintConfig(file=os.path.join(config_path, 'default.yml'))
+ module_conf = YamlLintConfig(file=os.path.join(config_path, 'modules.yml'))
+ plugin_conf = YamlLintConfig(file=os.path.join(config_path, 'plugins.yml'))
+
+ for path in paths:
+ extension = os.path.splitext(path)[1]
+
+ with open(path) as f:
+ contents = f.read()
+
+ if extension in ('.yml', '.yaml'):
+ self.check_yaml(yaml_conf, path, contents)
+ elif extension == '.py':
+ if path.startswith('lib/ansible/modules/') or path.startswith('plugins/modules/'):
+ conf = module_conf
+ else:
+ conf = plugin_conf
+
+ self.check_module(conf, path, contents)
+ else:
+ raise Exception('unsupported extension: %s' % extension)
+
+ def check_yaml(self, conf, path, contents):
+ """
+ :type conf: YamlLintConfig
+ :type path: str
+ :type contents: str
+ """
+ self.check_parsable(path, contents)
+ self.messages += [self.result_to_message(r, path) for r in linter.run(contents, conf, path)]
+
+ def check_module(self, conf, path, contents):
+ """
+ :type conf: YamlLintConfig
+ :type path: str
+ :type contents: str
+ """
+ docs = self.get_module_docs(path, contents)
+
+ for key, value in docs.items():
+ yaml_data = value['yaml']
+ lineno = value['lineno']
+
+ if yaml_data.startswith('\n'):
+ yaml_data = yaml_data[1:]
+ lineno += 1
+
+ self.check_parsable(path, yaml_data, lineno)
+
+ messages = list(linter.run(yaml_data, conf, path))
+
+ self.messages += [self.result_to_message(r, path, lineno - 1, key) for r in messages]
+
+ def check_parsable(self, path, contents, lineno=1):
+ """
+ :type path: str
+ :type contents: str
+ :type lineno: int
+ """
+ try:
+ yaml.load(contents, Loader=TestLoader)
+ except MarkedYAMLError as e:
+ self.messages += [{'code': 'unparsable-with-libyaml',
+ 'message': '%s - %s' % (e.args[0], e.args[2]),
+ 'path': path,
+ 'line': e.problem_mark.line + lineno,
+ 'column': e.problem_mark.column + 1,
+ 'level': 'error',
+ }]
+
+ @staticmethod
+ def result_to_message(result, path, line_offset=0, prefix=''):
+ """
+ :type result: any
+ :type path: str
+ :type line_offset: int
+ :type prefix: str
+ :rtype: dict[str, any]
+ """
+ if prefix:
+ prefix = '%s: ' % prefix
+
+ return dict(
+ code=result.rule or result.level,
+ message=prefix + result.desc,
+ path=path,
+ line=result.line + line_offset,
+ column=result.column,
+ level=result.level,
+ )
+
+ def get_module_docs(self, path, contents):
+ """
+ :type path: str
+ :type contents: str
+ :rtype: dict[str, any]
+ """
+ module_doc_types = [
+ 'DOCUMENTATION',
+ 'EXAMPLES',
+ 'RETURN',
+ ]
+
+ docs = {}
+
+ def check_assignment(statement, doc_types=None):
+ """Check the given statement for a documentation assignment."""
+ for target in statement.targets:
+ if not isinstance(target, ast.Name):
+ continue
+
+ if doc_types and target.id not in doc_types:
+ continue
+
+ docs[target.id] = dict(
+ yaml=statement.value.s,
+ lineno=statement.lineno,
+ end_lineno=statement.lineno + len(statement.value.s.splitlines())
+ )
+
+ module_ast = self.parse_module(path, contents)
+
+ if not module_ast:
+ return {}
+
+ is_plugin = path.startswith('lib/ansible/modules/') or path.startswith('lib/ansible/plugins/') or path.startswith('plugins/')
+ is_doc_fragment = path.startswith('lib/ansible/plugins/doc_fragments/') or path.startswith('plugins/doc_fragments/')
+
+ if is_plugin and not is_doc_fragment:
+ for body_statement in module_ast.body:
+ if isinstance(body_statement, ast.Assign):
+ check_assignment(body_statement, module_doc_types)
+ elif is_doc_fragment:
+ for body_statement in module_ast.body:
+ if isinstance(body_statement, ast.ClassDef):
+ for class_statement in body_statement.body:
+ if isinstance(class_statement, ast.Assign):
+ check_assignment(class_statement)
+ else:
+ raise Exception('unsupported path: %s' % path)
+
+ return docs
+
+ def parse_module(self, path, contents):
+ """
+ :type path: str
+ :type contents: str
+ :rtype: ast.Module | None
+ """
+ try:
+ return ast.parse(contents)
+ except SyntaxError as ex:
+ self.messages.append(dict(
+ code='python-syntax-error',
+ message=str(ex),
+ path=path,
+ line=ex.lineno,
+ column=ex.offset,
+ level='error',
+ ))
+ except Exception as ex: # pylint: disable=broad-except
+ self.messages.append(dict(
+ code='python-parse-error',
+ message=str(ex),
+ path=path,
+ line=0,
+ column=0,
+ level='error',
+ ))
+
+ return None
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1 b/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1
new file mode 100644
index 00000000..7e039bb4
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/ConfigureRemotingForAnsible.ps1
@@ -0,0 +1,453 @@
+#Requires -Version 3.0
+
+# Configure a Windows host for remote management with Ansible
+# -----------------------------------------------------------
+#
+# This script checks the current WinRM (PS Remoting) configuration and makes
+# the necessary changes to allow Ansible to connect, authenticate and
+# execute PowerShell commands.
+#
+# All events are logged to the Windows EventLog, useful for unattended runs.
+#
+# Use option -Verbose in order to see the verbose output messages.
+#
+# Use option -CertValidityDays to specify how long this certificate is valid
+# starting from today. So you would specify -CertValidityDays 3650 to get
+# a 10-year valid certificate.
+#
+# Use option -ForceNewSSLCert if the system has been SysPreped and a new
+# SSL Certificate must be forced on the WinRM Listener when re-running this
+# script. This is necessary when a new SID and CN name is created.
+#
+# Use option -EnableCredSSP to enable CredSSP as an authentication option.
+#
+# Use option -DisableBasicAuth to disable basic authentication.
+#
+# Use option -SkipNetworkProfileCheck to skip the network profile check.
+# Without specifying this the script will only run if the device's interfaces
+# are in DOMAIN or PRIVATE zones. Provide this switch if you want to enable
+# WinRM on a device with an interface in PUBLIC zone.
+#
+# Use option -SubjectName to specify the CN name of the certificate. This
+# defaults to the system's hostname and generally should not be specified.
+
+# Written by Trond Hindenes <trond@hindenes.com>
+# Updated by Chris Church <cchurch@ansible.com>
+# Updated by Michael Crilly <mike@autologic.cm>
+# Updated by Anton Ouzounov <Anton.Ouzounov@careerbuilder.com>
+# Updated by Nicolas Simond <contact@nicolas-simond.com>
+# Updated by Dag Wieërs <dag@wieers.com>
+# Updated by Jordan Borean <jborean93@gmail.com>
+# Updated by Erwan Quélin <erwan.quelin@gmail.com>
+# Updated by David Norman <david@dkn.email>
+#
+# Version 1.0 - 2014-07-06
+# Version 1.1 - 2014-11-11
+# Version 1.2 - 2015-05-15
+# Version 1.3 - 2016-04-04
+# Version 1.4 - 2017-01-05
+# Version 1.5 - 2017-02-09
+# Version 1.6 - 2017-04-18
+# Version 1.7 - 2017-11-23
+# Version 1.8 - 2018-02-23
+# Version 1.9 - 2018-09-21
+
+# Support -Verbose option
+[CmdletBinding()]
+
+Param (
+ [string]$SubjectName = $env:COMPUTERNAME,
+ [int]$CertValidityDays = 1095,
+ [switch]$SkipNetworkProfileCheck,
+ $CreateSelfSignedCert = $true,
+ [switch]$ForceNewSSLCert,
+ [switch]$GlobalHttpFirewallAccess,
+ [switch]$DisableBasicAuth = $false,
+ [switch]$EnableCredSSP
+)
+
+Function Write-Log
+{
+ $Message = $args[0]
+ Write-EventLog -LogName Application -Source $EventSource -EntryType Information -EventId 1 -Message $Message
+}
+
+Function Write-VerboseLog
+{
+ $Message = $args[0]
+ Write-Verbose $Message
+ Write-Log $Message
+}
+
+Function Write-HostLog
+{
+ $Message = $args[0]
+ Write-Output $Message
+ Write-Log $Message
+}
+
+Function New-LegacySelfSignedCert
+{
+ Param (
+ [string]$SubjectName,
+ [int]$ValidDays = 1095
+ )
+
+ $hostnonFQDN = $env:computerName
+ $hostFQDN = [System.Net.Dns]::GetHostByName(($env:computerName)).Hostname
+ $SignatureAlgorithm = "SHA256"
+
+ $name = New-Object -COM "X509Enrollment.CX500DistinguishedName.1"
+ $name.Encode("CN=$SubjectName", 0)
+
+ $key = New-Object -COM "X509Enrollment.CX509PrivateKey.1"
+ $key.ProviderName = "Microsoft Enhanced RSA and AES Cryptographic Provider"
+ $key.KeySpec = 1
+ $key.Length = 4096
+ $key.SecurityDescriptor = "D:PAI(A;;0xd01f01ff;;;SY)(A;;0xd01f01ff;;;BA)(A;;0x80120089;;;NS)"
+ $key.MachineContext = 1
+ $key.Create()
+
+ $serverauthoid = New-Object -COM "X509Enrollment.CObjectId.1"
+ $serverauthoid.InitializeFromValue("1.3.6.1.5.5.7.3.1")
+ $ekuoids = New-Object -COM "X509Enrollment.CObjectIds.1"
+ $ekuoids.Add($serverauthoid)
+ $ekuext = New-Object -COM "X509Enrollment.CX509ExtensionEnhancedKeyUsage.1"
+ $ekuext.InitializeEncode($ekuoids)
+
+ $cert = New-Object -COM "X509Enrollment.CX509CertificateRequestCertificate.1"
+ $cert.InitializeFromPrivateKey(2, $key, "")
+ $cert.Subject = $name
+ $cert.Issuer = $cert.Subject
+ $cert.NotBefore = (Get-Date).AddDays(-1)
+ $cert.NotAfter = $cert.NotBefore.AddDays($ValidDays)
+
+ $SigOID = New-Object -ComObject X509Enrollment.CObjectId
+ $SigOID.InitializeFromValue(([Security.Cryptography.Oid]$SignatureAlgorithm).Value)
+
+ [string[]] $AlternativeName += $hostnonFQDN
+ $AlternativeName += $hostFQDN
+ $IAlternativeNames = New-Object -ComObject X509Enrollment.CAlternativeNames
+
+ foreach ($AN in $AlternativeName)
+ {
+ $AltName = New-Object -ComObject X509Enrollment.CAlternativeName
+ $AltName.InitializeFromString(0x3,$AN)
+ $IAlternativeNames.Add($AltName)
+ }
+
+ $SubjectAlternativeName = New-Object -ComObject X509Enrollment.CX509ExtensionAlternativeNames
+ $SubjectAlternativeName.InitializeEncode($IAlternativeNames)
+
+ [String[]]$KeyUsage = ("DigitalSignature", "KeyEncipherment")
+ $KeyUsageObj = New-Object -ComObject X509Enrollment.CX509ExtensionKeyUsage
+ $KeyUsageObj.InitializeEncode([int][Security.Cryptography.X509Certificates.X509KeyUsageFlags]($KeyUsage))
+ $KeyUsageObj.Critical = $true
+
+ $cert.X509Extensions.Add($KeyUsageObj)
+ $cert.X509Extensions.Add($ekuext)
+ $cert.SignatureInformation.HashAlgorithm = $SigOID
+ $CERT.X509Extensions.Add($SubjectAlternativeName)
+ $cert.Encode()
+
+ $enrollment = New-Object -COM "X509Enrollment.CX509Enrollment.1"
+ $enrollment.InitializeFromRequest($cert)
+ $certdata = $enrollment.CreateRequest(0)
+ $enrollment.InstallResponse(2, $certdata, 0, "")
+
+ # extract/return the thumbprint from the generated cert
+ $parsed_cert = New-Object System.Security.Cryptography.X509Certificates.X509Certificate2
+ $parsed_cert.Import([System.Text.Encoding]::UTF8.GetBytes($certdata))
+
+ return $parsed_cert.Thumbprint
+}
+
+Function Enable-GlobalHttpFirewallAccess
+{
+ Write-Verbose "Forcing global HTTP firewall access"
+ # this is a fairly naive implementation; could be more sophisticated about rule matching/collapsing
+ $fw = New-Object -ComObject HNetCfg.FWPolicy2
+
+ # try to find/enable the default rule first
+ $add_rule = $false
+ $matching_rules = $fw.Rules | Where-Object { $_.Name -eq "Windows Remote Management (HTTP-In)" }
+ $rule = $null
+ If ($matching_rules) {
+ If ($matching_rules -isnot [Array]) {
+ Write-Verbose "Editing existing single HTTP firewall rule"
+ $rule = $matching_rules
+ }
+ Else {
+ # try to find one with the All or Public profile first
+ Write-Verbose "Found multiple existing HTTP firewall rules..."
+ $rule = $matching_rules | ForEach-Object { $_.Profiles -band 4 }[0]
+
+ If (-not $rule -or $rule -is [Array]) {
+ Write-Verbose "Editing an arbitrary single HTTP firewall rule (multiple existed)"
+ # oh well, just pick the first one
+ $rule = $matching_rules[0]
+ }
+ }
+ }
+
+ If (-not $rule) {
+ Write-Verbose "Creating a new HTTP firewall rule"
+ $rule = New-Object -ComObject HNetCfg.FWRule
+ $rule.Name = "Windows Remote Management (HTTP-In)"
+ $rule.Description = "Inbound rule for Windows Remote Management via WS-Management. [TCP 5985]"
+ $add_rule = $true
+ }
+
+ $rule.Profiles = 0x7FFFFFFF
+ $rule.Protocol = 6
+ $rule.LocalPorts = 5985
+ $rule.RemotePorts = "*"
+ $rule.LocalAddresses = "*"
+ $rule.RemoteAddresses = "*"
+ $rule.Enabled = $true
+ $rule.Direction = 1
+ $rule.Action = 1
+ $rule.Grouping = "Windows Remote Management"
+
+ If ($add_rule) {
+ $fw.Rules.Add($rule)
+ }
+
+ Write-Verbose "HTTP firewall rule $($rule.Name) updated"
+}
+
+# Setup error handling.
+Trap
+{
+ $_
+ Exit 1
+}
+$ErrorActionPreference = "Stop"
+
+# Get the ID and security principal of the current user account
+$myWindowsID=[System.Security.Principal.WindowsIdentity]::GetCurrent()
+$myWindowsPrincipal=new-object System.Security.Principal.WindowsPrincipal($myWindowsID)
+
+# Get the security principal for the Administrator role
+$adminRole=[System.Security.Principal.WindowsBuiltInRole]::Administrator
+
+# Check to see if we are currently running "as Administrator"
+if (-Not $myWindowsPrincipal.IsInRole($adminRole))
+{
+ Write-Output "ERROR: You need elevated Administrator privileges in order to run this script."
+ Write-Output " Start Windows PowerShell by using the Run as Administrator option."
+ Exit 2
+}
+
+$EventSource = $MyInvocation.MyCommand.Name
+If (-Not $EventSource)
+{
+ $EventSource = "Powershell CLI"
+}
+
+If ([System.Diagnostics.EventLog]::Exists('Application') -eq $False -or [System.Diagnostics.EventLog]::SourceExists($EventSource) -eq $False)
+{
+ New-EventLog -LogName Application -Source $EventSource
+}
+
+# Detect PowerShell version.
+If ($PSVersionTable.PSVersion.Major -lt 3)
+{
+ Write-Log "PowerShell version 3 or higher is required."
+ Throw "PowerShell version 3 or higher is required."
+}
+
+# Find and start the WinRM service.
+Write-Verbose "Verifying WinRM service."
+If (!(Get-Service "WinRM"))
+{
+ Write-Log "Unable to find the WinRM service."
+ Throw "Unable to find the WinRM service."
+}
+ElseIf ((Get-Service "WinRM").Status -ne "Running")
+{
+ Write-Verbose "Setting WinRM service to start automatically on boot."
+ Set-Service -Name "WinRM" -StartupType Automatic
+ Write-Log "Set WinRM service to start automatically on boot."
+ Write-Verbose "Starting WinRM service."
+ Start-Service -Name "WinRM" -ErrorAction Stop
+ Write-Log "Started WinRM service."
+
+}
+
+# WinRM should be running; check that we have a PS session config.
+If (!(Get-PSSessionConfiguration -Verbose:$false) -or (!(Get-ChildItem WSMan:\localhost\Listener)))
+{
+ If ($SkipNetworkProfileCheck) {
+ Write-Verbose "Enabling PS Remoting without checking Network profile."
+ Enable-PSRemoting -SkipNetworkProfileCheck -Force -ErrorAction Stop
+ Write-Log "Enabled PS Remoting without checking Network profile."
+ }
+ Else {
+ Write-Verbose "Enabling PS Remoting."
+ Enable-PSRemoting -Force -ErrorAction Stop
+ Write-Log "Enabled PS Remoting."
+ }
+}
+Else
+{
+ Write-Verbose "PS Remoting is already enabled."
+}
+
+# Ensure LocalAccountTokenFilterPolicy is set to 1
+# https://github.com/ansible/ansible/issues/42978
+$token_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System"
+$token_prop_name = "LocalAccountTokenFilterPolicy"
+$token_key = Get-Item -Path $token_path
+$token_value = $token_key.GetValue($token_prop_name, $null)
+if ($token_value -ne 1) {
+ Write-Verbose "Setting LocalAccountTOkenFilterPolicy to 1"
+ if ($null -ne $token_value) {
+ Remove-ItemProperty -Path $token_path -Name $token_prop_name
+ }
+ New-ItemProperty -Path $token_path -Name $token_prop_name -Value 1 -PropertyType DWORD > $null
+}
+
+# Make sure there is a SSL listener.
+$listeners = Get-ChildItem WSMan:\localhost\Listener
+If (!($listeners | Where-Object {$_.Keys -like "TRANSPORT=HTTPS"}))
+{
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ # Create the hashtables of settings to be used.
+ $valueset = @{
+ Hostname = $SubjectName
+ CertificateThumbprint = $thumbprint
+ }
+
+ $selectorset = @{
+ Transport = "HTTPS"
+ Address = "*"
+ }
+
+ Write-Verbose "Enabling SSL listener."
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ Write-Log "Enabled SSL listener."
+}
+Else
+{
+ Write-Verbose "SSL listener is already active."
+
+ # Force a new SSL cert on Listener if the $ForceNewSSLCert
+ If ($ForceNewSSLCert)
+ {
+
+ # We cannot use New-SelfSignedCertificate on 2012R2 and earlier
+ $thumbprint = New-LegacySelfSignedCert -SubjectName $SubjectName -ValidDays $CertValidityDays
+ Write-HostLog "Self-signed SSL certificate generated; thumbprint: $thumbprint"
+
+ $valueset = @{
+ CertificateThumbprint = $thumbprint
+ Hostname = $SubjectName
+ }
+
+ # Delete the listener for SSL
+ $selectorset = @{
+ Address = "*"
+ Transport = "HTTPS"
+ }
+ Remove-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset
+
+ # Add new Listener with new SSL cert
+ New-WSManInstance -ResourceURI 'winrm/config/Listener' -SelectorSet $selectorset -ValueSet $valueset
+ }
+}
+
+# Check for basic authentication.
+$basicAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "Basic"}
+
+If ($DisableBasicAuth)
+{
+ If (($basicAuthSetting.Value) -eq $true)
+ {
+ Write-Verbose "Disabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $false
+ Write-Log "Disabled basic auth support."
+ }
+ Else
+ {
+ Write-Verbose "Basic auth is already disabled."
+ }
+}
+Else
+{
+ If (($basicAuthSetting.Value) -eq $false)
+ {
+ Write-Verbose "Enabling basic auth support."
+ Set-Item -Path "WSMan:\localhost\Service\Auth\Basic" -Value $true
+ Write-Log "Enabled basic auth support."
+ }
+ Else
+ {
+ Write-Verbose "Basic auth is already enabled."
+ }
+}
+
+# If EnableCredSSP if set to true
+If ($EnableCredSSP)
+{
+ # Check for CredSSP authentication
+ $credsspAuthSetting = Get-ChildItem WSMan:\localhost\Service\Auth | Where-Object {$_.Name -eq "CredSSP"}
+ If (($credsspAuthSetting.Value) -eq $false)
+ {
+ Write-Verbose "Enabling CredSSP auth support."
+ Enable-WSManCredSSP -role server -Force
+ Write-Log "Enabled CredSSP auth support."
+ }
+}
+
+If ($GlobalHttpFirewallAccess) {
+ Enable-GlobalHttpFirewallAccess
+}
+
+# Configure firewall to allow WinRM HTTPS connections.
+$fwtest1 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS"
+$fwtest2 = netsh advfirewall firewall show rule name="Allow WinRM HTTPS" profile=any
+If ($fwtest1.count -lt 5)
+{
+ Write-Verbose "Adding firewall rule to allow WinRM HTTPS."
+ netsh advfirewall firewall add rule profile=any name="Allow WinRM HTTPS" dir=in localport=5986 protocol=TCP action=allow
+ Write-Log "Added firewall rule to allow WinRM HTTPS."
+}
+ElseIf (($fwtest1.count -ge 5) -and ($fwtest2.count -lt 5))
+{
+ Write-Verbose "Updating firewall rule to allow WinRM HTTPS for any profile."
+ netsh advfirewall firewall set rule name="Allow WinRM HTTPS" new profile=any
+ Write-Log "Updated firewall rule to allow WinRM HTTPS for any profile."
+}
+Else
+{
+ Write-Verbose "Firewall rule already exists to allow WinRM HTTPS."
+}
+
+# Test a remoting connection to localhost, which should work.
+$httpResult = Invoke-Command -ComputerName "localhost" -ScriptBlock {$env:COMPUTERNAME} -ErrorVariable httpError -ErrorAction SilentlyContinue
+$httpsOptions = New-PSSessionOption -SkipCACheck -SkipCNCheck -SkipRevocationCheck
+
+$httpsResult = New-PSSession -UseSSL -ComputerName "localhost" -SessionOption $httpsOptions -ErrorVariable httpsError -ErrorAction SilentlyContinue
+
+If ($httpResult -and $httpsResult)
+{
+ Write-Verbose "HTTP: Enabled | HTTPS: Enabled"
+}
+ElseIf ($httpsResult -and !$httpResult)
+{
+ Write-Verbose "HTTP: Disabled | HTTPS: Enabled"
+}
+ElseIf ($httpResult -and !$httpsResult)
+{
+ Write-Verbose "HTTP: Enabled | HTTPS: Disabled"
+}
+Else
+{
+ Write-Log "Unable to establish an HTTP or HTTPS remoting session."
+ Throw "Unable to establish an HTTP or HTTPS remoting session."
+}
+Write-VerboseLog "PS Remoting has been successfully configured for Ansible."
diff --git a/test/lib/ansible_test/_data/setup/docker.sh b/test/lib/ansible_test/_data/setup/docker.sh
new file mode 100644
index 00000000..c65e8ac5
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/docker.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -eu
+
+# Required for newer mysql-server packages to install/upgrade on Ubuntu 16.04.
+rm -f /usr/sbin/policy-rc.d
+
+# Improve prompts on remote host for interactive use.
+# shellcheck disable=SC1117
+cat << EOF > ~/.bashrc
+alias ls='ls --color=auto'
+export PS1='\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
+cd ~/ansible/
+EOF
diff --git a/test/lib/ansible_test/_data/setup/remote.sh b/test/lib/ansible_test/_data/setup/remote.sh
new file mode 100644
index 00000000..35167e21
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/remote.sh
@@ -0,0 +1,159 @@
+#!/bin/sh
+
+set -eu
+
+platform="$1"
+python_version="$2"
+python_interpreter="python${python_version}"
+
+cd ~/
+
+install_pip () {
+ if ! "${python_interpreter}" -m pip.__main__ --version --disable-pip-version-check 2>/dev/null; then
+ case "${python_version}" in
+ *)
+ pip_bootstrap_url="https://ansible-ci-files.s3.amazonaws.com/ansible-test/get-pip-20.3.4.py"
+ ;;
+ esac
+ curl --silent --show-error "${pip_bootstrap_url}" -o /tmp/get-pip.py
+ "${python_interpreter}" /tmp/get-pip.py --disable-pip-version-check --quiet
+ rm /tmp/get-pip.py
+ fi
+}
+
+if [ "${platform}" = "freebsd" ]; then
+ py_version="$(echo "${python_version}" | tr -d '.')"
+
+ while true; do
+ env ASSUME_ALWAYS_YES=YES pkg bootstrap && \
+ pkg install -q -y \
+ bash \
+ curl \
+ gtar \
+ "python${py_version}" \
+ "py${py_version}-Jinja2" \
+ "py${py_version}-virtualenv" \
+ "py${py_version}-cryptography" \
+ sudo \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+
+ if ! grep '^PermitRootLogin yes$' /etc/ssh/sshd_config > /dev/null; then
+ sed -i '' 's/^# *PermitRootLogin.*$/PermitRootLogin yes/;' /etc/ssh/sshd_config
+ service sshd restart
+ fi
+elif [ "${platform}" = "rhel" ]; then
+ if grep '8\.' /etc/redhat-release; then
+ while true; do
+ yum module install -q -y python36 && \
+ yum install -q -y \
+ gcc \
+ python3-devel \
+ python3-jinja2 \
+ python3-virtualenv \
+ python3-cryptography \
+ iptables \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+ else
+ while true; do
+ yum install -q -y \
+ gcc \
+ python-devel \
+ python-virtualenv \
+ python2-cryptography \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+ fi
+
+ # pin packaging and pyparsing to match the downstream vendored versions
+ "${python_interpreter}" -m pip install packaging==20.4 pyparsing==2.4.7 --disable-pip-version-check
+elif [ "${platform}" = "centos" ]; then
+ while true; do
+ yum install -q -y \
+ gcc \
+ python-devel \
+ python-virtualenv \
+ python2-cryptography \
+ libffi-devel \
+ openssl-devel \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+
+ install_pip
+elif [ "${platform}" = "osx" ]; then
+ while true; do
+ pip install --disable-pip-version-check --quiet \
+ 'virtualenv==16.7.10' \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+elif [ "${platform}" = "aix" ]; then
+ chfs -a size=1G /
+ chfs -a size=4G /usr
+ chfs -a size=1G /var
+ chfs -a size=1G /tmp
+ chfs -a size=2G /opt
+ while true; do
+ yum install -q -y \
+ gcc \
+ libffi-devel \
+ python-jinja2 \
+ python-cryptography \
+ python-pip && \
+ pip install --disable-pip-version-check --quiet virtualenv \
+ && break
+ echo "Failed to install packages. Sleeping before trying again..."
+ sleep 10
+ done
+fi
+
+# Generate our ssh key and add it to our authorized_keys file.
+# We also need to add localhost's server keys to known_hosts.
+
+if [ ! -f "${HOME}/.ssh/id_rsa.pub" ]; then
+ ssh-keygen -m PEM -q -t rsa -N '' -f "${HOME}/.ssh/id_rsa"
+ # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko
+ touch "${HOME}/.ssh/id_rsa.new"
+ chmod 0600 "${HOME}/.ssh/id_rsa.new"
+ sed 's/\(BEGIN\|END\) PRIVATE KEY/\1 RSA PRIVATE KEY/' "${HOME}/.ssh/id_rsa" > "${HOME}/.ssh/id_rsa.new"
+ mv "${HOME}/.ssh/id_rsa.new" "${HOME}/.ssh/id_rsa"
+ cat "${HOME}/.ssh/id_rsa.pub" >> "${HOME}/.ssh/authorized_keys"
+ chmod 0600 "${HOME}/.ssh/authorized_keys"
+ for key in /etc/ssh/ssh_host_*_key.pub; do
+ pk=$(cat "${key}")
+ echo "localhost ${pk}" >> "${HOME}/.ssh/known_hosts"
+ done
+fi
+
+# Improve prompts on remote host for interactive use.
+# shellcheck disable=SC1117
+cat << EOF > ~/.bashrc
+if ls --color > /dev/null 2>&1; then
+ alias ls='ls --color'
+elif ls -G > /dev/null 2>&1; then
+ alias ls='ls -G'
+fi
+export PS1='\[\e]0;\u@\h: \w\a\]\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
+EOF
+
+# Make sure ~/ansible/ is the starting directory for interactive shells.
+if [ "${platform}" = "osx" ]; then
+ echo "cd ~/ansible/" >> ~/.bashrc
+elif [ "${platform}" = "macos" ] ; then
+ echo "export BASH_SILENCE_DEPRECATION_WARNING=1" >> ~/.bashrc
+ echo "cd ~/ansible/" >> ~/.bashrc
+fi
diff --git a/test/lib/ansible_test/_data/setup/windows-httptester.ps1 b/test/lib/ansible_test/_data/setup/windows-httptester.ps1
new file mode 100644
index 00000000..70bdb332
--- /dev/null
+++ b/test/lib/ansible_test/_data/setup/windows-httptester.ps1
@@ -0,0 +1,228 @@
+<#
+.SYNOPSIS
+Designed to set a Windows host to connect to the httptester container running
+on the Ansible host. This will setup the Windows host file and forward the
+local ports to use this connection. This will continue to run in the background
+until the script is deleted.
+
+Run this with SSH with the -R arguments to foward ports 8080 and 8443 to the
+httptester container.
+
+.PARAMETER Hosts
+A list of hostnames, delimited by '|', to add to the Windows hosts file for the
+httptester container, e.g. 'ansible.host.com|secondary.host.test'.
+#>
+[CmdletBinding()]
+param(
+ [Parameter(Mandatory=$true, Position=0)][String]$Hosts
+)
+$Hosts = $Hosts.Split('|')
+
+$ProgressPreference = "SilentlyContinue"
+$ErrorActionPreference = "Stop"
+$os_version = [Version](Get-Item -Path "$env:SystemRoot\System32\kernel32.dll").VersionInfo.ProductVersion
+Write-Verbose -Message "Configuring HTTP Tester on Windows $os_version for '$($Hosts -join "', '")'"
+
+Function Get-PmapperRuleBytes {
+ <#
+ .SYNOPSIS
+ Create the byte values that configures a rule in the PMapper configuration
+ file. This isn't really documented but because PMapper is only used for
+ Server 2008 R2 we will stick to 1 version and just live with the legacy
+ work for now.
+
+ .PARAMETER ListenPort
+ The port to listen on localhost, this will be forwarded to the host defined
+ by ConnectAddress and ConnectPort.
+
+ .PARAMETER ConnectAddress
+ The hostname or IP to map the traffic to.
+
+ .PARAMETER ConnectPort
+ This port of ConnectAddress to map the traffic to.
+ #>
+ param(
+ [Parameter(Mandatory=$true)][UInt16]$ListenPort,
+ [Parameter(Mandatory=$true)][String]$ConnectAddress,
+ [Parameter(Mandatory=$true)][Int]$ConnectPort
+ )
+
+ $connect_field = "$($ConnectAddress):$ConnectPort"
+ $connect_bytes = [System.Text.Encoding]::ASCII.GetBytes($connect_field)
+ $data_length = [byte]($connect_bytes.Length + 6) # size of payload minus header, length, and footer
+ $port_bytes = [System.BitConverter]::GetBytes($ListenPort)
+
+ $payload = [System.Collections.Generic.List`1[Byte]]@()
+ $payload.Add([byte]16) > $null # header is \x10, means Configure Mapping rule
+ $payload.Add($data_length) > $null
+ $payload.AddRange($connect_bytes)
+ $payload.AddRange($port_bytes)
+ $payload.AddRange([byte[]]@(0, 0)) # 2 extra bytes of padding
+ $payload.Add([byte]0) > $null # 0 is TCP, 1 is UDP
+ $payload.Add([byte]0) > $null # 0 is Any, 1 is Internet
+ $payload.Add([byte]31) > $null # footer is \x1f, means end of Configure Mapping rule
+
+ return ,$payload.ToArray()
+}
+
+Write-Verbose -Message "Adding host file entries"
+$hosts_file = "$env:SystemRoot\System32\drivers\etc\hosts"
+$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
+$changed = $false
+foreach ($httptester_host in $Hosts) {
+ $host_line = "127.0.0.1 $httptester_host # ansible-test httptester"
+ if ($host_line -notin $hosts_file_lines) {
+ $hosts_file_lines += $host_line
+ $changed = $true
+ }
+}
+if ($changed) {
+ Write-Verbose -Message "Host file is missing entries, adding missing entries"
+ [System.IO.File]::WriteAllLines($hosts_file, $hosts_file_lines)
+}
+
+# forward ports
+$forwarded_ports = @{
+ 80 = 8080
+ 443 = 8443
+}
+if ($os_version -ge [Version]"6.2") {
+ Write-Verbose -Message "Using netsh to configure forwarded ports"
+ foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) {
+ $port_set = netsh interface portproxy show v4tov4 | `
+ Where-Object { $_ -match "127.0.0.1\s*$($forwarded_port.Key)\s*127.0.0.1\s*$($forwarded_port.Value)" }
+
+ if (-not $port_set) {
+ Write-Verbose -Message "Adding netsh portproxy rule for $($forwarded_port.Key) -> $($forwarded_port.Value)"
+ $add_args = @(
+ "interface",
+ "portproxy",
+ "add",
+ "v4tov4",
+ "listenaddress=127.0.0.1",
+ "listenport=$($forwarded_port.Key)",
+ "connectaddress=127.0.0.1",
+ "connectport=$($forwarded_port.Value)"
+ )
+ $null = netsh $add_args 2>&1
+ }
+ }
+} else {
+ Write-Verbose -Message "Using Port Mapper to configure forwarded ports"
+ # netsh interface portproxy doesn't work on local addresses in older
+ # versions of Windows. Use custom application Port Mapper to acheive the
+ # same outcome
+ # http://www.analogx.com/contents/download/Network/pmapper/Freeware.htm
+ $s3_url = "https://ansible-ci-files.s3.amazonaws.com/ansible-test/pmapper-1.04.exe"
+
+ # download the Port Mapper executable to a temporary directory
+ $pmapper_folder = Join-Path -Path ([System.IO.Path]::GetTempPath()) -ChildPath ([System.IO.Path]::GetRandomFileName())
+ $pmapper_exe = Join-Path -Path $pmapper_folder -ChildPath pmapper.exe
+ $pmapper_config = Join-Path -Path $pmapper_folder -ChildPath pmapper.dat
+ New-Item -Path $pmapper_folder -ItemType Directory > $null
+
+ $stop = $false
+ do {
+ try {
+ Write-Verbose -Message "Attempting download of '$s3_url'"
+ (New-Object -TypeName System.Net.WebClient).DownloadFile($s3_url, $pmapper_exe)
+ $stop = $true
+ } catch { Start-Sleep -Second 5 }
+ } until ($stop)
+
+ # create the Port Mapper rule file that contains our forwarded ports
+ $fs = [System.IO.File]::Create($pmapper_config)
+ try {
+ foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) {
+ Write-Verbose -Message "Creating forwarded port rule for $($forwarded_port.Key) -> $($forwarded_port.Value)"
+ $pmapper_rule = Get-PmapperRuleBytes -ListenPort $forwarded_port.Key -ConnectAddress 127.0.0.1 -ConnectPort $forwarded_port.Value
+ $fs.Write($pmapper_rule, 0, $pmapper_rule.Length)
+ }
+ } finally {
+ $fs.Close()
+ }
+
+ Write-Verbose -Message "Starting Port Mapper '$pmapper_exe' in the background"
+ $start_args = @{
+ CommandLine = $pmapper_exe
+ CurrentDirectory = $pmapper_folder
+ }
+ $res = Invoke-CimMethod -ClassName Win32_Process -MethodName Create -Arguments $start_args
+ if ($res.ReturnValue -ne 0) {
+ $error_msg = switch($res.ReturnValue) {
+ 2 { "Access denied" }
+ 3 { "Insufficient privilege" }
+ 8 { "Unknown failure" }
+ 9 { "Path not found" }
+ 21 { "Invalid parameter" }
+ default { "Undefined Error: $($res.ReturnValue)" }
+ }
+ Write-Error -Message "Failed to start pmapper: $error_msg"
+ }
+ $pmapper_pid = $res.ProcessId
+ Write-Verbose -Message "Port Mapper PID: $pmapper_pid"
+}
+
+Write-Verbose -Message "Wait for current script at '$PSCommandPath' to be deleted before running cleanup"
+$fsw = New-Object -TypeName System.IO.FileSystemWatcher
+$fsw.Path = Split-Path -Path $PSCommandPath -Parent
+$fsw.Filter = Split-Path -Path $PSCommandPath -Leaf
+$fsw.WaitForChanged([System.IO.WatcherChangeTypes]::Deleted, 3600000) > $null
+Write-Verbose -Message "Script delete or timeout reached, cleaning up Windows httptester artifacts"
+
+Write-Verbose -Message "Cleanup host file entries"
+$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
+$new_lines = [System.Collections.ArrayList]@()
+$changed = $false
+foreach ($host_line in $hosts_file_lines) {
+ if ($host_line.EndsWith("# ansible-test httptester")) {
+ $changed = $true
+ continue
+ }
+ $new_lines.Add($host_line) > $null
+}
+if ($changed) {
+ Write-Verbose -Message "Host file has extra entries, removing extra entries"
+ [System.IO.File]::WriteAllLines($hosts_file, $new_lines)
+}
+
+if ($os_version -ge [Version]"6.2") {
+ Write-Verbose -Message "Cleanup of forwarded port configured in netsh"
+ foreach ($forwarded_port in $forwarded_ports.GetEnumerator()) {
+ $port_set = netsh interface portproxy show v4tov4 | `
+ Where-Object { $_ -match "127.0.0.1\s*$($forwarded_port.Key)\s*127.0.0.1\s*$($forwarded_port.Value)" }
+
+ if ($port_set) {
+ Write-Verbose -Message "Removing netsh portproxy rule for $($forwarded_port.Key) -> $($forwarded_port.Value)"
+ $delete_args = @(
+ "interface",
+ "portproxy",
+ "delete",
+ "v4tov4",
+ "listenaddress=127.0.0.1",
+ "listenport=$($forwarded_port.Key)"
+ )
+ $null = netsh $delete_args 2>&1
+ }
+ }
+} else {
+ Write-Verbose -Message "Stopping Port Mapper executable based on pid $pmapper_pid"
+ Stop-Process -Id $pmapper_pid -Force
+
+ # the process may not stop straight away, try multiple times to delete the Port Mapper folder
+ $attempts = 1
+ do {
+ try {
+ Write-Verbose -Message "Cleanup temporary files for Port Mapper at '$pmapper_folder' - Attempt: $attempts"
+ Remove-Item -Path $pmapper_folder -Force -Recurse
+ break
+ } catch {
+ Write-Verbose -Message "Cleanup temporary files for Port Mapper failed, waiting 5 seconds before trying again:$($_ | Out-String)"
+ if ($attempts -ge 5) {
+ break
+ }
+ $attempts += 1
+ Start-Sleep -Second 5
+ }
+ } until ($true)
+}
diff --git a/test/lib/ansible_test/_data/sslcheck.py b/test/lib/ansible_test/_data/sslcheck.py
new file mode 100755
index 00000000..37b82279
--- /dev/null
+++ b/test/lib/ansible_test/_data/sslcheck.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+"""Show openssl version."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+# noinspection PyBroadException
+try:
+ from ssl import OPENSSL_VERSION_INFO
+ VERSION = list(OPENSSL_VERSION_INFO[:3])
+except Exception: # pylint: disable=broad-except
+ VERSION = None
+
+
+def main():
+ """Main program entry point."""
+ print(json.dumps(dict(
+ version=VERSION,
+ )))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/test/lib/ansible_test/_data/versions.py b/test/lib/ansible_test/_data/versions.py
new file mode 100755
index 00000000..4babef01
--- /dev/null
+++ b/test/lib/ansible_test/_data/versions.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+"""Show python and pip versions."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import warnings
+
+warnings.simplefilter('ignore') # avoid python version deprecation warnings when using newer pip dependencies
+
+try:
+ import pip
+except ImportError:
+ pip = None
+
+print(sys.version)
+
+if pip:
+ print('pip %s from %s' % (pip.__version__, os.path.dirname(pip.__file__)))
diff --git a/test/lib/ansible_test/_data/virtualenvcheck.py b/test/lib/ansible_test/_data/virtualenvcheck.py
new file mode 100755
index 00000000..552b6e7d
--- /dev/null
+++ b/test/lib/ansible_test/_data/virtualenvcheck.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+"""Detect the real python interpreter when running in a virtual environment created by the 'virtualenv' module."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+try:
+ from sys import real_prefix
+except ImportError:
+ real_prefix = None
+
+print(json.dumps(dict(
+ real_prefix=real_prefix,
+)))
diff --git a/test/lib/ansible_test/_data/yamlcheck.py b/test/lib/ansible_test/_data/yamlcheck.py
new file mode 100755
index 00000000..591842f4
--- /dev/null
+++ b/test/lib/ansible_test/_data/yamlcheck.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+"""Show python and pip versions."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+try:
+ import yaml
+except ImportError:
+ yaml = None
+
+try:
+ from yaml import CLoader
+except ImportError:
+ CLoader = None
+
+print(json.dumps(dict(
+ yaml=bool(yaml),
+ cloader=bool(CLoader),
+)))
diff --git a/test/lib/ansible_test/_internal/__init__.py b/test/lib/ansible_test/_internal/__init__.py
new file mode 100644
index 00000000..35f04422
--- /dev/null
+++ b/test/lib/ansible_test/_internal/__init__.py
@@ -0,0 +1,3 @@
+"""Support code for Ansible testing infrastructure."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/test/lib/ansible_test/_internal/ansible_util.py b/test/lib/ansible_test/_internal/ansible_util.py
new file mode 100644
index 00000000..c1cf8552
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ansible_util.py
@@ -0,0 +1,296 @@
+"""Miscellaneous utility functions and classes specific to ansible cli tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from . import types as t
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+from .io import (
+ write_text_file,
+)
+
+from .util import (
+ common_environment,
+ display,
+ find_python,
+ ApplicationError,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_SOURCE_ROOT,
+ get_ansible_version,
+)
+
+from .util_common import (
+ create_temp_dir,
+ run_command,
+ ResultType,
+)
+
+from .config import (
+ IntegrationConfig,
+ PosixIntegrationConfig,
+ EnvironmentConfig,
+ CommonConfig,
+)
+
+from .data import (
+ data_context,
+)
+
+CHECK_YAML_VERSIONS = {}
+
+
+def ansible_environment(args, color=True, ansible_config=None):
+ """
+ :type args: CommonConfig
+ :type color: bool
+ :type ansible_config: str | None
+ :rtype: dict[str, str]
+ """
+ env = common_environment()
+ path = env['PATH']
+
+ if not path.startswith(ANSIBLE_BIN_PATH + os.path.pathsep):
+ path = ANSIBLE_BIN_PATH + os.path.pathsep + path
+
+ if not ansible_config:
+ # use the default empty configuration unless one has been provided
+ ansible_config = args.get_ansible_config()
+
+ if not args.explain and not os.path.exists(ansible_config):
+ raise ApplicationError('Configuration not found: %s' % ansible_config)
+
+ ansible = dict(
+ ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE=str(SOFT_RLIMIT_NOFILE),
+ ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
+ ANSIBLE_FORCE_HANDLERS='true', # allow cleanup handlers to run when tests fail
+ ANSIBLE_HOST_PATTERN_MISMATCH='error', # prevent tests from unintentionally passing when hosts are not found
+ ANSIBLE_INVENTORY='/dev/null', # force tests to provide inventory
+ ANSIBLE_DEPRECATION_WARNINGS='false',
+ ANSIBLE_HOST_KEY_CHECKING='false',
+ ANSIBLE_RETRY_FILES_ENABLED='false',
+ ANSIBLE_CONFIG=ansible_config,
+ ANSIBLE_LIBRARY='/dev/null',
+ ANSIBLE_DEVEL_WARNING='false', # Don't show warnings that CI is running devel
+ PYTHONPATH=get_ansible_python_path(args),
+ PAGER='/bin/cat',
+ PATH=path,
+ # give TQM worker processes time to report code coverage results
+ # without this the last task in a play may write no coverage file, an empty file, or an incomplete file
+ # enabled even when not using code coverage to surface warnings when worker processes do not exit cleanly
+ ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT='100',
+ ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY='0.1',
+ )
+
+ if isinstance(args, IntegrationConfig) and args.coverage:
+ # standard path injection is not effective for ansible-connection, instead the location must be configured
+ # ansible-connection only requires the injector for code coverage
+ # the correct python interpreter is already selected using the sys.executable used to invoke ansible
+ ansible.update(dict(
+ ANSIBLE_CONNECTION_PATH=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector', 'ansible-connection'),
+ ))
+
+ if isinstance(args, PosixIntegrationConfig):
+ ansible.update(dict(
+ ANSIBLE_PYTHON_INTERPRETER='/set/ansible_python_interpreter/in/inventory', # force tests to set ansible_python_interpreter in inventory
+ ))
+
+ env.update(ansible)
+
+ if args.debug:
+ env.update(dict(
+ ANSIBLE_DEBUG='true',
+ ANSIBLE_LOG_PATH=os.path.join(ResultType.LOGS.name, 'debug.log'),
+ ))
+
+ if data_context().content.collection:
+ env.update(dict(
+ ANSIBLE_COLLECTIONS_PATH=data_context().content.collection.root,
+ ))
+
+ if data_context().content.is_ansible:
+ env.update(configure_plugin_paths(args))
+
+ return env
+
+
+def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str]
+ """Return environment variables with paths to plugins relevant for the current command."""
+ if not isinstance(args, IntegrationConfig):
+ return {}
+
+ support_path = os.path.join(ANSIBLE_SOURCE_ROOT, 'test', 'support', args.command)
+
+ # provide private copies of collections for integration tests
+ collection_root = os.path.join(support_path, 'collections')
+
+ env = dict(
+ ANSIBLE_COLLECTIONS_PATH=collection_root,
+ )
+
+ # provide private copies of plugins for integration tests
+ plugin_root = os.path.join(support_path, 'plugins')
+
+ plugin_list = [
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'netconf',
+ # 'shell' is not configurable
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ ]
+
+ # most plugins follow a standard naming convention
+ plugin_map = dict(('%s_plugins' % name, name) for name in plugin_list)
+
+ # these plugins do not follow the standard naming convention
+ plugin_map.update(
+ doc_fragment='doc_fragments',
+ library='modules',
+ module_utils='module_utils',
+ )
+
+ env.update(dict(('ANSIBLE_%s' % key.upper(), os.path.join(plugin_root, value)) for key, value in plugin_map.items()))
+
+ # only configure directories which exist
+ env = dict((key, value) for key, value in env.items() if os.path.isdir(value))
+
+ return env
+
+
+def get_ansible_python_path(args): # type: (CommonConfig) -> str
+ """
+ Return a directory usable for PYTHONPATH, containing only the ansible package.
+ If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit.
+ """
+ try:
+ return get_ansible_python_path.python_path
+ except AttributeError:
+ pass
+
+ if ANSIBLE_SOURCE_ROOT:
+ # when running from source there is no need for a temporary directory to isolate the ansible package
+ python_path = os.path.dirname(ANSIBLE_LIB_ROOT)
+ else:
+ # when not running from source the installed directory is unsafe to add to PYTHONPATH
+ # doing so would expose many unwanted packages on sys.path
+ # instead a temporary directory is created which contains only ansible using a symlink
+ python_path = create_temp_dir(prefix='ansible-test-')
+
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(python_path, 'ansible'))
+
+ if not args.explain:
+ generate_egg_info(python_path)
+
+ get_ansible_python_path.python_path = python_path
+
+ return python_path
+
+
+def generate_egg_info(path): # type: (str) -> None
+ """Generate an egg-info in the specified base directory."""
+ # minimal PKG-INFO stub following the format defined in PEP 241
+ # required for older setuptools versions to avoid a traceback when importing pkg_resources from packages like cryptography
+ # newer setuptools versions are happy with an empty directory
+ # including a stub here means we don't need to locate the existing file or have setup.py generate it when running from source
+ pkg_info = '''
+Metadata-Version: 1.0
+Name: ansible
+Version: %s
+Platform: UNKNOWN
+Summary: Radically simple IT automation
+Author-email: info@ansible.com
+License: GPLv3+
+''' % get_ansible_version()
+
+ pkg_info_path = os.path.join(path, 'ansible_base.egg-info', 'PKG-INFO')
+
+ if os.path.exists(pkg_info_path):
+ return
+
+ write_text_file(pkg_info_path, pkg_info.lstrip(), create_directories=True)
+
+
+def check_pyyaml(args, version, required=True, quiet=False):
+ """
+ :type args: EnvironmentConfig
+ :type version: str
+ :type required: bool
+ :type quiet: bool
+ """
+ try:
+ return CHECK_YAML_VERSIONS[version]
+ except KeyError:
+ pass
+
+ python = find_python(version)
+ stdout, _dummy = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'yamlcheck.py')],
+ capture=True, always=True)
+
+ result = json.loads(stdout)
+
+ yaml = result['yaml']
+ cloader = result['cloader']
+
+ if yaml or required:
+ # results are cached only if pyyaml is required or present
+ # it is assumed that tests will not uninstall/re-install pyyaml -- if they do, those changes will go undetected
+ CHECK_YAML_VERSIONS[version] = result
+
+ if not quiet:
+ if not yaml and required:
+ display.warning('PyYAML is not installed for interpreter: %s' % python)
+ elif not cloader:
+ display.warning('PyYAML will be slow due to installation without libyaml support for interpreter: %s' % python)
+
+ return result
+
+
+class CollectionDetail:
+ """Collection detail."""
+ def __init__(self): # type: () -> None
+ self.version = None # type: t.Optional[str]
+
+
+class CollectionDetailError(ApplicationError):
+ """An error occurred retrieving collection detail."""
+ def __init__(self, reason): # type: (str) -> None
+ super(CollectionDetailError, self).__init__('Error collecting collection detail: %s' % reason)
+ self.reason = reason
+
+
+def get_collection_detail(args, python): # type: (EnvironmentConfig, str) -> CollectionDetail
+ """Return collection detail."""
+ collection = data_context().content.collection
+ directory = os.path.join(collection.root, collection.directory)
+
+ stdout = run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0]
+ result = json.loads(stdout)
+ error = result.get('error')
+
+ if error:
+ raise CollectionDetailError(error)
+
+ version = result.get('version')
+
+ detail = CollectionDetail()
+ detail.version = str(version) if version is not None else None
+
+ return detail
diff --git a/test/lib/ansible_test/_internal/cache.py b/test/lib/ansible_test/_internal/cache.py
new file mode 100644
index 00000000..85fdbb1f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cache.py
@@ -0,0 +1,35 @@
+"""Cache for commonly shared data that is intended to be immutable."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class CommonCache:
+ """Common cache."""
+ def __init__(self, args):
+ """
+ :param args: CommonConfig
+ """
+ self.args = args
+
+ def get(self, key, factory):
+ """
+ :param key: str
+ :param factory: () -> any
+ :rtype: any
+ """
+ if key not in self.args.cache:
+ self.args.cache[key] = factory()
+
+ return self.args.cache[key]
+
+ def get_with_args(self, key, factory):
+ """
+ :param key: str
+ :param factory: (CommonConfig) -> any
+ :rtype: any
+ """
+
+ if key not in self.args.cache:
+ self.args.cache[key] = factory(self.args)
+
+ return self.args.cache[key]
diff --git a/test/lib/ansible_test/_internal/ci/__init__.py b/test/lib/ansible_test/_internal/ci/__init__.py
new file mode 100644
index 00000000..d6e2ad6e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/__init__.py
@@ -0,0 +1,227 @@
+"""Support code for CI environments."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import base64
+import json
+import os
+import tempfile
+
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+ to_text,
+)
+
+from ..io import (
+ read_text_file,
+ write_text_file,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..util import (
+ ABC,
+ ApplicationError,
+ display,
+ get_subclasses,
+ import_plugins,
+ raw_command,
+)
+
+
+class ChangeDetectionNotSupported(ApplicationError):
+ """Exception for cases where change detection is not supported."""
+
+
+class AuthContext:
+ """Context information required for Ansible Core CI authentication."""
+ def __init__(self): # type: () -> None
+ self.region = None # type: t.Optional[str]
+
+
+class CIProvider(ABC):
+ """Base class for CI provider plugins."""
+ priority = 500
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+
+ @property
+ @abc.abstractmethod
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+
+ @property
+ @abc.abstractmethod
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+
+ @abc.abstractmethod
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+
+ @abc.abstractmethod
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+
+ @abc.abstractmethod
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+
+ @abc.abstractmethod
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+
+ @abc.abstractmethod
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+
+ @abc.abstractmethod
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+
+
+def get_ci_provider(): # type: () -> CIProvider
+ """Return a CI provider instance for the current environment."""
+ try:
+ return get_ci_provider.provider
+ except AttributeError:
+ pass
+
+ provider = None
+
+ import_plugins('ci')
+
+ candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__))
+
+ for candidate in candidates:
+ if candidate.is_supported():
+ provider = candidate()
+ break
+
+ if provider.code:
+ display.info('Detected CI provider: %s' % provider.name)
+
+ get_ci_provider.provider = provider
+
+ return provider
+
+
+class AuthHelper(ABC):
+ """Public key based authentication helper for Ansible Core CI."""
+ def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None
+ """Sign the given auth request and make the public key available."""
+ payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
+ signature_raw_bytes = self.sign_bytes(payload_bytes)
+ signature = to_text(base64.b64encode(signature_raw_bytes))
+
+ request.update(signature=signature)
+
+ def initialize_private_key(self): # type: () -> str
+ """
+ Initialize and publish a new key pair (if needed) and return the private key.
+ The private key is cached across ansible-test invocations so it is only generated and published once per CI job.
+ """
+ path = os.path.expanduser('~/.ansible-core-ci-private.key')
+
+ if os.path.exists(to_bytes(path)):
+ private_key_pem = read_text_file(path)
+ else:
+ private_key_pem = self.generate_private_key()
+ write_text_file(path, private_key_pem)
+
+ return private_key_pem
+
+ @abc.abstractmethod
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+
+ @abc.abstractmethod
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+
+ @abc.abstractmethod
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+
+
+class CryptographyAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
+ """Cryptography based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives.asymmetric import ec
+ from cryptography.hazmat.primitives.serialization import load_pem_private_key
+
+ private_key_pem = self.initialize_private_key()
+ private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
+
+ signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
+
+ return signature_raw_bytes
+
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ # import cryptography here to avoid overhead and failures in environments which do not use/provide it
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.primitives.asymmetric import ec
+
+ private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
+ public_key = private_key.public_key()
+
+ private_key_pem = to_text(private_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.PKCS8,
+ encryption_algorithm=serialization.NoEncryption(),
+ ))
+
+ public_key_pem = to_text(public_key.public_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
+ ))
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
+
+
+class OpenSSLAuthHelper(AuthHelper, ABC): # pylint: disable=abstract-method
+ """OpenSSL based public key based authentication helper for Ansible Core CI."""
+ def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
+ """Sign the given payload and return the signature, initializing a new key pair if required."""
+ private_key_pem = self.initialize_private_key()
+
+ with tempfile.NamedTemporaryFile() as private_key_file:
+ private_key_file.write(to_bytes(private_key_pem))
+ private_key_file.flush()
+
+ with tempfile.NamedTemporaryFile() as payload_file:
+ payload_file.write(payload_bytes)
+ payload_file.flush()
+
+ with tempfile.NamedTemporaryFile() as signature_file:
+ raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
+ signature_raw_bytes = signature_file.read()
+
+ return signature_raw_bytes
+
+ def generate_private_key(self): # type: () -> str
+ """Generate a new key pair, publishing the public key and returning the private key."""
+ private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
+ public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
+
+ self.publish_public_key(public_key_pem)
+
+ return private_key_pem
diff --git a/test/lib/ansible_test/_internal/ci/azp.py b/test/lib/ansible_test/_internal/ci/azp.py
new file mode 100644
index 00000000..f2a9d206
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/azp.py
@@ -0,0 +1,268 @@
+"""Support code for working with Azure Pipelines."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import tempfile
+import uuid
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..http import (
+ HttpClient,
+ urlencode,
+)
+
+from ..util import (
+ display,
+ MissingEnvironmentVariable,
+)
+
+from . import (
+ AuthContext,
+ ChangeDetectionNotSupported,
+ CIProvider,
+ CryptographyAuthHelper,
+)
+
+CODE = 'azp'
+
+
+class AzurePipelines(CIProvider):
+ """CI provider implementation for Azure Pipelines."""
+ def __init__(self):
+ self.auth = AzurePipelinesAuthHelper()
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Azure Pipelines'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ try:
+ prefix = 'azp-%s-%s-%s' % (
+ os.environ['BUILD_BUILDID'],
+ os.environ['SYSTEM_JOBATTEMPT'],
+ os.environ['SYSTEM_JOBIDENTIFIER'],
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ prefix = re.sub(r'[^a-zA-Z0-9]+', '-', prefix).lower()
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
+
+ if base_branch:
+ base_branch = 'origin/%s' % base_branch
+
+ return base_branch or ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = AzurePipelinesChanges(args)
+
+ if result.is_pr:
+ job_type = 'pull request'
+ else:
+ job_type = 'merge commit'
+
+ display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ if result.paths is None:
+ # There are several likely causes of this:
+ # - First run on a new branch.
+ # - Too many pull requests passed since the last merge run passed.
+ display.warning('No successful commit found. All tests will be executed.')
+
+ return result.paths
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ return True
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ try:
+ request = dict(
+ org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
+ project_name=os.environ['SYSTEM_TEAMPROJECT'],
+ build_id=int(os.environ['BUILD_BUILDID']),
+ task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ self.auth.sign_request(request)
+
+ auth = dict(
+ azp=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ changes = AzurePipelinesChanges(args)
+
+ details = dict(
+ base_commit=changes.base_commit,
+ commit=changes.commit,
+ )
+
+ return details
+
+
+class AzurePipelinesAuthHelper(CryptographyAuthHelper):
+ """
+ Authentication helper for Azure Pipelines.
+ Based on cryptography since it is provided by the default Azure Pipelines environment.
+ """
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+ try:
+ agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ # the temporary file cannot be deleted because we do not know when the agent has processed it
+ # placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
+ with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
+ public_key_file.write(to_bytes(public_key_pem))
+ public_key_file.flush()
+
+ # make the agent aware of the public key by declaring it as an attachment
+ vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
+
+
+class AzurePipelinesChanges:
+ """Change information for an Azure Pipelines build."""
+ def __init__(self, args): # type: (CommonConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ try:
+ self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
+ self.project = os.environ['SYSTEM_TEAMPROJECT']
+ self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
+ self.source_branch = os.environ['BUILD_SOURCEBRANCH']
+ self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
+ self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ if self.source_branch.startswith('refs/tags/'):
+ raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
+
+ self.org = self.org_uri.strip('/').split('/')[-1]
+ self.is_pr = self.pr_branch_name is not None
+
+ if self.is_pr:
+ # HEAD is a merge commit of the PR branch into the target branch
+ # HEAD^1 is HEAD of the target branch (first parent of merge commit)
+ # HEAD^2 is HEAD of the PR branch (second parent of merge commit)
+ # see: https://git-scm.com/docs/gitrevisions
+ self.branch = self.pr_branch_name
+ self.base_commit = 'HEAD^1'
+ self.commit = 'HEAD^2'
+ else:
+ commits = self.get_successful_merge_run_commits()
+
+ self.branch = self.source_branch_name
+ self.base_commit = self.get_last_successful_commit(commits)
+ self.commit = 'HEAD'
+
+ self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
+
+ if self.base_commit:
+ self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
+
+ # <commit>...<commit>
+ # This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
+ # see: https://git-scm.com/docs/git-diff
+ dot_range = '%s...%s' % (self.base_commit, self.commit)
+
+ self.paths = sorted(self.git.get_diff_names([dot_range]))
+ self.diff = self.git.get_diff([dot_range])
+ else:
+ self.paths = None # act as though change detection not enabled, do not filter targets
+ self.diff = []
+
+ def get_successful_merge_run_commits(self): # type: () -> t.Set[str]
+ """Return a set of recent successsful merge commits from Azure Pipelines."""
+ parameters = dict(
+ maxBuildsPerDefinition=100, # max 5000
+ queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
+ resultFilter='succeeded',
+ reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
+ repositoryType=self.repo_type,
+ repositoryId='%s/%s' % (self.org, self.project),
+ )
+
+ url = '%s%s/build/builds?%s' % (self.org_uri, self.project, urlencode(parameters))
+
+ http = HttpClient(self.args)
+ response = http.get(url)
+
+ # noinspection PyBroadException
+ try:
+ result = response.json()
+ except Exception: # pylint: disable=broad-except
+ # most likely due to a private project, which returns an HTTP 203 response with HTML
+ display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
+ return set()
+
+ commits = set(build['sourceVersion'] for build in result['value'])
+
+ return commits
+
+ def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str]
+ """Return the last successful commit from git history that is found in the given commit list, or None."""
+ commit_history = self.git.get_rev_list(max_count=100)
+ ordered_successful_commits = [commit for commit in commit_history if commit in commits]
+ last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
+ return last_successful_commit
+
+
+def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None
+ """Upload and attach a file to the current timeline record."""
+ vso('task.addattachment', dict(type=file_type, name=file_name), path)
+
+
+def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None
+ """
+ Write a logging command for the Azure Pipelines agent to process.
+ See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
+ """
+ display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
diff --git a/test/lib/ansible_test/_internal/ci/local.py b/test/lib/ansible_test/_internal/ci/local.py
new file mode 100644
index 00000000..5f605c86
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/local.py
@@ -0,0 +1,217 @@
+"""Support code for working without a supported CI provider."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import platform
+import random
+import re
+
+from .. import types as t
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ is_binary_file,
+ SubprocessError,
+)
+
+from . import (
+ AuthContext,
+ CIProvider,
+)
+
+CODE = '' # not really a CI provider, so use an empty string for the code
+
+
+class Local(CIProvider):
+ """CI provider implementation when not using CI."""
+ priority = 1000
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return True
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Local'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ node = re.sub(r'[^a-zA-Z0-9]+', '-', platform.node().split('.')[0]).lower()
+
+ prefix = 'ansible-test-%s-%d' % (node, random.randint(10000000, 99999999))
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ return ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = LocalChanges(args)
+
+ display.info('Detected branch %s forked from %s at commit %s' % (
+ result.current_branch, result.fork_branch, result.fork_point))
+
+ if result.untracked and not args.untracked:
+ display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
+ len(result.untracked))
+
+ if result.committed and not args.committed:
+ display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
+ len(result.committed))
+
+ if result.staged and not args.staged:
+ display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
+ len(result.staged))
+
+ if result.unstaged and not args.unstaged:
+ display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
+ len(result.unstaged))
+
+ names = set()
+
+ if args.tracked:
+ names |= set(result.tracked)
+ if args.untracked:
+ names |= set(result.untracked)
+ if args.committed:
+ names |= set(result.committed)
+ if args.staged:
+ names |= set(result.staged)
+ if args.unstaged:
+ names |= set(result.unstaged)
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ for path in result.untracked:
+ if is_binary_file(path):
+ args.metadata.changes[path] = ((0, 0),)
+ continue
+
+ line_count = len(read_text_file(path).splitlines())
+
+ args.metadata.changes[path] = ((1, line_count),)
+
+ return sorted(names)
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ path = self._get_aci_key_path(context)
+ return os.path.exists(path)
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ path = self._get_aci_key_path(context)
+ auth_key = read_text_file(path).strip()
+
+ request = dict(
+ key=auth_key,
+ nonce=None,
+ )
+
+ auth = dict(
+ remote=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ return None # not yet implemented for local
+
+ def _get_aci_key_path(self, context): # type: (AuthContext) -> str
+ path = os.path.expanduser('~/.ansible-core-ci.key')
+
+ if context.region:
+ path += '.%s' % context.region
+
+ return path
+
+
+class InvalidBranch(ApplicationError):
+ """Exception for invalid branch specification."""
+ def __init__(self, branch, reason): # type: (str, str) -> None
+ message = 'Invalid branch: %s\n%s' % (branch, reason)
+
+ super(InvalidBranch, self).__init__(message)
+
+ self.branch = branch
+
+
+class LocalChanges:
+ """Change information for local work."""
+ def __init__(self, args): # type: (TestConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ self.current_branch = self.git.get_branch()
+
+ if self.is_official_branch(self.current_branch):
+ raise InvalidBranch(branch=self.current_branch,
+ reason='Current branch is not a feature branch.')
+
+ self.fork_branch = None
+ self.fork_point = None
+
+ self.local_branches = sorted(self.git.get_branches())
+ self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
+
+ for self.fork_branch in self.official_branches:
+ try:
+ self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
+ break
+ except SubprocessError:
+ pass
+
+ if self.fork_point is None:
+ raise ApplicationError('Unable to auto-detect fork branch and fork point.')
+
+ # tracked files (including unchanged)
+ self.tracked = sorted(self.git.get_file_names(['--cached']))
+ # untracked files (except ignored)
+ self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
+ # tracked changes (including deletions) committed since the branch was forked
+ self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
+ # tracked changes (including deletions) which are staged
+ self.staged = sorted(self.git.get_diff_names(['--cached']))
+ # tracked changes (including deletions) which are not staged
+ self.unstaged = sorted(self.git.get_diff_names([]))
+ # diff of all tracked files from fork point to working copy
+ self.diff = self.git.get_diff([self.fork_point])
+
+ def is_official_branch(self, name): # type: (str) -> bool
+ """Return True if the given branch name an official branch for development or releases."""
+ if self.args.base_branch:
+ return name == self.args.base_branch
+
+ if name == 'devel':
+ return True
+
+ if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/ci/shippable.py b/test/lib/ansible_test/_internal/ci/shippable.py
new file mode 100644
index 00000000..f9f0a192
--- /dev/null
+++ b/test/lib/ansible_test/_internal/ci/shippable.py
@@ -0,0 +1,269 @@
+"""Support code for working with Shippable."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import time
+
+from .. import types as t
+
+from ..config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from ..git import (
+ Git,
+)
+
+from ..http import (
+ HttpClient,
+ urlencode,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ MissingEnvironmentVariable,
+ SubprocessError,
+)
+
+from . import (
+ AuthContext,
+ ChangeDetectionNotSupported,
+ CIProvider,
+ OpenSSLAuthHelper,
+)
+
+
+CODE = 'shippable'
+
+
+class Shippable(CIProvider):
+ """CI provider implementation for Shippable."""
+ def __init__(self):
+ self.auth = ShippableAuthHelper()
+
+ @staticmethod
+ def is_supported(): # type: () -> bool
+ """Return True if this provider is supported in the current running environment."""
+ return os.environ.get('SHIPPABLE') == 'true'
+
+ @property
+ def code(self): # type: () -> str
+ """Return a unique code representing this provider."""
+ return CODE
+
+ @property
+ def name(self): # type: () -> str
+ """Return descriptive name for this provider."""
+ return 'Shippable'
+
+ def generate_resource_prefix(self): # type: () -> str
+ """Return a resource prefix specific to this CI provider."""
+ try:
+ prefix = 'shippable-%s-%s' % (
+ os.environ['SHIPPABLE_BUILD_NUMBER'],
+ os.environ['SHIPPABLE_JOB_NUMBER'],
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ return prefix
+
+ def get_base_branch(self): # type: () -> str
+ """Return the base branch or an empty string."""
+ base_branch = os.environ.get('BASE_BRANCH')
+
+ if base_branch:
+ base_branch = 'origin/%s' % base_branch
+
+ return base_branch or ''
+
+ def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
+ """Initialize change detection."""
+ result = ShippableChanges(args)
+
+ if result.is_pr:
+ job_type = 'pull request'
+ elif result.is_tag:
+ job_type = 'tag'
+ else:
+ job_type = 'merge commit'
+
+ display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
+
+ if not args.metadata.changes:
+ args.metadata.populate_changes(result.diff)
+
+ if result.paths is None:
+ # There are several likely causes of this:
+ # - First run on a new branch.
+ # - Too many pull requests passed since the last merge run passed.
+ display.warning('No successful commit found. All tests will be executed.')
+
+ return result.paths
+
+ def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
+ """Return True if Ansible Core CI is supported."""
+ return True
+
+ def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
+ """Return authentication details for Ansible Core CI."""
+ try:
+ request = dict(
+ run_id=os.environ['SHIPPABLE_BUILD_ID'],
+ job_number=int(os.environ['SHIPPABLE_JOB_NUMBER']),
+ )
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ self.auth.sign_request(request)
+
+ auth = dict(
+ shippable=request,
+ )
+
+ return auth
+
+ def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
+ """Return details about git in the current environment."""
+ commit = os.environ.get('COMMIT')
+ base_commit = os.environ.get('BASE_COMMIT')
+
+ details = dict(
+ base_commit=base_commit,
+ commit=commit,
+ merged_commit=self._get_merged_commit(args, commit),
+ )
+
+ return details
+
+ # noinspection PyUnusedLocal
+ def _get_merged_commit(self, args, commit): # type: (CommonConfig, str) -> t.Optional[str] # pylint: disable=unused-argument
+ """Find the merged commit that should be present."""
+ if not commit:
+ return None
+
+ git = Git()
+
+ try:
+ show_commit = git.run_git(['show', '--no-patch', '--no-abbrev', commit])
+ except SubprocessError as ex:
+ # This should only fail for pull requests where the commit does not exist.
+ # Merge runs would fail much earlier when attempting to checkout the commit.
+ raise ApplicationError('Commit %s was not found:\n\n%s\n\n'
+ 'GitHub may not have fully replicated the commit across their infrastructure.\n'
+ 'It is also possible the commit was removed by a force push between job creation and execution.\n'
+ 'Find the latest run for the pull request and restart failed jobs as needed.'
+ % (commit, ex.stderr.strip()))
+
+ head_commit = git.run_git(['show', '--no-patch', '--no-abbrev', 'HEAD'])
+
+ if show_commit == head_commit:
+ # Commit is HEAD, so this is not a pull request or the base branch for the pull request is up-to-date.
+ return None
+
+ match_merge = re.search(r'^Merge: (?P<parents>[0-9a-f]{40} [0-9a-f]{40})$', head_commit, flags=re.MULTILINE)
+
+ if not match_merge:
+ # The most likely scenarios resulting in a failure here are:
+ # A new run should or does supersede this job, but it wasn't cancelled in time.
+ # A job was superseded and then later restarted.
+ raise ApplicationError('HEAD is not commit %s or a merge commit:\n\n%s\n\n'
+ 'This job has likely been superseded by another run due to additional commits being pushed.\n'
+ 'Find the latest run for the pull request and restart failed jobs as needed.'
+ % (commit, head_commit.strip()))
+
+ parents = set(match_merge.group('parents').split(' '))
+
+ if len(parents) != 2:
+ raise ApplicationError('HEAD is a %d-way octopus merge.' % len(parents))
+
+ if commit not in parents:
+ raise ApplicationError('Commit %s is not a parent of HEAD.' % commit)
+
+ parents.remove(commit)
+
+ last_commit = parents.pop()
+
+ return last_commit
+
+
+class ShippableAuthHelper(OpenSSLAuthHelper):
+ """
+ Authentication helper for Shippable.
+ Based on OpenSSL since cryptography is not provided by the default Shippable environment.
+ """
+ def publish_public_key(self, public_key_pem): # type: (str) -> None
+ """Publish the given public key."""
+ # display the public key as a single line to avoid mangling such as when prefixing each line with a timestamp
+ display.info(public_key_pem.replace('\n', ' '))
+ # allow time for logs to become available to reduce repeated API calls
+ time.sleep(3)
+
+
+class ShippableChanges:
+ """Change information for Shippable build."""
+ def __init__(self, args): # type: (TestConfig) -> None
+ self.args = args
+ self.git = Git()
+
+ try:
+ self.branch = os.environ['BRANCH']
+ self.is_pr = os.environ['IS_PULL_REQUEST'] == 'true'
+ self.is_tag = os.environ['IS_GIT_TAG'] == 'true'
+ self.commit = os.environ['COMMIT']
+ self.project_id = os.environ['PROJECT_ID']
+ self.commit_range = os.environ['SHIPPABLE_COMMIT_RANGE']
+ except KeyError as ex:
+ raise MissingEnvironmentVariable(name=ex.args[0])
+
+ if self.is_tag:
+ raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
+
+ if self.is_pr:
+ self.paths = sorted(self.git.get_diff_names([self.commit_range]))
+ self.diff = self.git.get_diff([self.commit_range])
+ else:
+ commits = self.get_successful_merge_run_commits(self.project_id, self.branch)
+ last_successful_commit = self.get_last_successful_commit(commits)
+
+ if last_successful_commit:
+ self.paths = sorted(self.git.get_diff_names([last_successful_commit, self.commit]))
+ self.diff = self.git.get_diff([last_successful_commit, self.commit])
+ else:
+ # first run for branch
+ self.paths = None # act as though change detection not enabled, do not filter targets
+ self.diff = []
+
+ def get_successful_merge_run_commits(self, project_id, branch): # type: (str, str) -> t.Set[str]
+ """Return a set of recent successsful merge commits from Shippable for the given project and branch."""
+ parameters = dict(
+ isPullRequest='false',
+ projectIds=project_id,
+ branch=branch,
+ )
+
+ url = 'https://api.shippable.com/runs?%s' % urlencode(parameters)
+
+ http = HttpClient(self.args, always=True)
+ response = http.get(url)
+ result = response.json()
+
+ if 'id' in result and result['id'] == 4004:
+ # most likely due to a private project, which returns an HTTP 200 response with JSON
+ display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
+ return set()
+
+ commits = set(run['commitSha'] for run in result if run['statusCode'] == 30)
+
+ return commits
+
+ def get_last_successful_commit(self, successful_commits): # type: (t.Set[str]) -> t.Optional[str]
+ """Return the last successful commit from git history that is found in the given commit list, or None."""
+ commit_history = self.git.get_rev_list(max_count=100)
+ ordered_successful_commits = [commit for commit in commit_history if commit in successful_commits]
+ last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
+ return last_successful_commit
diff --git a/test/lib/ansible_test/_internal/classification.py b/test/lib/ansible_test/_internal/classification.py
new file mode 100644
index 00000000..52385d1e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/classification.py
@@ -0,0 +1,977 @@
+"""Classify changes in Ansible code."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+import time
+
+from . import types as t
+
+from .target import (
+ walk_module_targets,
+ walk_integration_targets,
+ walk_units_targets,
+ walk_compile_targets,
+ walk_sanity_targets,
+ load_integration_prefixes,
+ analyze_integration_target_dependencies,
+)
+
+from .util import (
+ display,
+ is_subdir,
+)
+
+from .import_analysis import (
+ get_python_module_utils_imports,
+ get_python_module_utils_name,
+)
+
+from .csharp_import_analysis import (
+ get_csharp_module_utils_imports,
+ get_csharp_module_utils_name,
+)
+
+from .powershell_import_analysis import (
+ get_powershell_module_utils_imports,
+ get_powershell_module_utils_name,
+)
+
+from .config import (
+ TestConfig,
+ IntegrationConfig,
+)
+
+from .metadata import (
+ ChangeDescription,
+)
+
+from .data import (
+ data_context,
+)
+
+FOCUSED_TARGET = '__focused__'
+
+
+def categorize_changes(args, paths, verbose_command=None):
+ """
+ :type args: TestConfig
+ :type paths: list[str]
+ :type verbose_command: str
+ :rtype: ChangeDescription
+ """
+ mapper = PathMapper(args)
+
+ commands = {
+ 'sanity': set(),
+ 'units': set(),
+ 'integration': set(),
+ 'windows-integration': set(),
+ 'network-integration': set(),
+ }
+
+ focused_commands = collections.defaultdict(set)
+
+ deleted_paths = set()
+ original_paths = set()
+ additional_paths = set()
+ no_integration_paths = set()
+
+ for path in paths:
+ if not os.path.exists(path):
+ deleted_paths.add(path)
+ continue
+
+ original_paths.add(path)
+
+ dependent_paths = mapper.get_dependent_paths(path)
+
+ if not dependent_paths:
+ continue
+
+ display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
+
+ for dependent_path in dependent_paths:
+ display.info(dependent_path, verbosity=2)
+ additional_paths.add(dependent_path)
+
+ additional_paths -= set(paths) # don't count changed paths as additional paths
+
+ if additional_paths:
+ display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
+ paths = sorted(set(paths) | additional_paths)
+
+ display.info('Mapping %d changed file(s) to tests.' % len(paths))
+
+ none_count = 0
+
+ for path in paths:
+ tests = mapper.classify(path)
+
+ if tests is None:
+ focused_target = False
+
+ display.info('%s -> all' % path, verbosity=1)
+ tests = all_tests(args) # not categorized, run all tests
+ display.warning('Path not categorized: %s' % path)
+ else:
+ focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
+
+ tests = dict((key, value) for key, value in tests.items() if value)
+
+ if focused_target and not any('integration' in command for command in tests):
+ no_integration_paths.add(path) # path triggers no integration tests
+
+ if verbose_command:
+ result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
+
+ # identify targeted integration tests (those which only target a single integration command)
+ if 'integration' in verbose_command and tests.get(verbose_command):
+ if not any('integration' in command for command in tests if command != verbose_command):
+ if focused_target:
+ result += ' (focused)'
+
+ result += ' (targeted)'
+ else:
+ result = '%s' % tests
+
+ if not tests.get(verbose_command):
+ # minimize excessive output from potentially thousands of files which do not trigger tests
+ none_count += 1
+ verbosity = 2
+ else:
+ verbosity = 1
+
+ if args.verbosity >= verbosity:
+ display.info('%s -> %s' % (path, result), verbosity=1)
+
+ for command, target in tests.items():
+ commands[command].add(target)
+
+ if focused_target:
+ focused_commands[command].add(target)
+
+ if none_count > 0 and args.verbosity < 2:
+ display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
+
+ for command in commands:
+ commands[command].discard('none')
+
+ if any(target == 'all' for target in commands[command]):
+ commands[command] = set(['all'])
+
+ commands = dict((c, sorted(commands[c])) for c in commands if commands[c])
+ focused_commands = dict((c, sorted(focused_commands[c])) for c in focused_commands)
+
+ for command in commands:
+ if commands[command] == ['all']:
+ commands[command] = [] # changes require testing all targets, do not filter targets
+
+ changes = ChangeDescription()
+ changes.command = verbose_command
+ changes.changed_paths = sorted(original_paths)
+ changes.deleted_paths = sorted(deleted_paths)
+ changes.regular_command_targets = commands
+ changes.focused_command_targets = focused_commands
+ changes.no_integration_paths = sorted(no_integration_paths)
+
+ return changes
+
+
+class PathMapper:
+ """Map file paths to test commands and targets."""
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ self.args = args
+ self.integration_all_target = get_integration_all_target(self.args)
+
+ self.integration_targets = list(walk_integration_targets())
+ self.module_targets = list(walk_module_targets())
+ self.compile_targets = list(walk_compile_targets())
+ self.units_targets = list(walk_units_targets())
+ self.sanity_targets = list(walk_sanity_targets())
+ self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
+ self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
+
+ self.units_modules = set(target.module for target in self.units_targets if target.module)
+ self.units_paths = set(a for target in self.units_targets for a in target.aliases)
+ self.sanity_paths = set(target.path for target in self.sanity_targets)
+
+ self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
+ self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
+ self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
+
+ self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'posix/' in target.aliases for m in target.modules)
+ self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'windows/' in target.aliases for m in target.modules)
+ self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
+ if 'network/' in target.aliases for m in target.modules)
+
+ self.prefixes = load_integration_prefixes()
+ self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
+
+ self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+ self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+ self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
+
+ self.paths_to_dependent_targets = {}
+
+ for target in self.integration_targets:
+ for path in target.needs_file:
+ if path not in self.paths_to_dependent_targets:
+ self.paths_to_dependent_targets[path] = set()
+
+ self.paths_to_dependent_targets[path].add(target)
+
+ def get_dependent_paths(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
+ paths = set()
+
+ while unprocessed_paths:
+ queued_paths = list(unprocessed_paths)
+ paths |= unprocessed_paths
+ unprocessed_paths = set()
+
+ for queued_path in queued_paths:
+ new_paths = self.get_dependent_paths_non_recursive(queued_path)
+
+ for new_path in new_paths:
+ if new_path not in paths:
+ unprocessed_paths.add(new_path)
+
+ return sorted(paths)
+
+ def get_dependent_paths_non_recursive(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ paths = self.get_dependent_paths_internal(path)
+ paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
+ paths = sorted(set(paths))
+
+ return paths
+
+ def get_dependent_paths_internal(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ ext = os.path.splitext(os.path.split(path)[1])[1]
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.py':
+ return self.get_python_module_utils_usage(path)
+
+ if ext == '.psm1':
+ return self.get_powershell_module_utils_usage(path)
+
+ if ext == '.cs':
+ return self.get_csharp_module_utils_usage(path)
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ return self.get_integration_target_usage(path)
+
+ return []
+
+ def get_python_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.python_module_utils_imports:
+ display.info('Analyzing python module_utils imports...')
+ before = time.time()
+ self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
+ after = time.time()
+ display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
+
+ name = get_python_module_utils_name(path)
+
+ return sorted(self.python_module_utils_imports[name])
+
+ def get_powershell_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.powershell_module_utils_imports:
+ display.info('Analyzing powershell module_utils imports...')
+ before = time.time()
+ self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
+ after = time.time()
+ display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
+
+ name = get_powershell_module_utils_name(path)
+
+ return sorted(self.powershell_module_utils_imports[name])
+
+ def get_csharp_module_utils_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ if not self.csharp_module_utils_imports:
+ display.info('Analyzing C# module_utils imports...')
+ before = time.time()
+ self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
+ after = time.time()
+ display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
+
+ name = get_csharp_module_utils_name(path)
+
+ return sorted(self.csharp_module_utils_imports[name])
+
+ def get_integration_target_usage(self, path):
+ """
+ :type path: str
+ :rtype: list[str]
+ """
+ target_name = path.split('/')[3]
+ dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
+ for target in sorted(self.integration_dependencies.get(target_name, set()))]
+
+ return dependents
+
+ def classify(self, path):
+ """
+ :type path: str
+ :rtype: dict[str, str] | None
+ """
+ result = self._classify(path)
+
+ # run all tests when no result given
+ if result is None:
+ return None
+
+ # run sanity on path unless result specified otherwise
+ if path in self.sanity_paths and 'sanity' not in result:
+ result['sanity'] = path
+
+ return result
+
+ def _classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path."""
+ if data_context().content.is_ansible:
+ return self._classify_ansible(path)
+
+ if data_context().content.collection:
+ return self._classify_collection(path)
+
+ return None
+
+ def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules common to all layouts."""
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if os.path.sep not in path:
+ if filename in (
+ 'azure-pipelines.yml',
+ 'shippable.yml',
+ ):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.azure-pipelines'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if is_subdir(path, '.github'):
+ return minimal
+
+ if is_subdir(path, data_context().content.integration_targets_path):
+ if not os.path.exists(path):
+ return minimal
+
+ target = self.integration_targets_by_name.get(path.split('/')[3])
+
+ if not target:
+ display.warning('Unexpected non-target found: %s' % path)
+ return minimal
+
+ if 'hidden/' in target.aliases:
+ return minimal # already expanded using get_dependent_paths
+
+ return {
+ 'integration': target.name if 'posix/' in target.aliases else None,
+ 'windows-integration': target.name if 'windows/' in target.aliases else None,
+ 'network-integration': target.name if 'network/' in target.aliases else None,
+ FOCUSED_TARGET: True,
+ }
+
+ if is_subdir(path, data_context().content.integration_path):
+ if dirname == data_context().content.integration_path:
+ for command in (
+ 'integration',
+ 'windows-integration',
+ 'network-integration',
+ ):
+ if name == command and ext == '.cfg':
+ return {
+ command: self.integration_all_target,
+ }
+
+ if name == command + '.requirements' and ext == '.txt':
+ return {
+ command: self.integration_all_target,
+ }
+
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ }
+
+ if is_subdir(path, data_context().content.sanity_path):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ }
+
+ if is_subdir(path, data_context().content.unit_path):
+ if path in self.units_paths:
+ return {
+ 'units': path,
+ }
+
+ # changes to files which are not unit tests should trigger tests from the nearest parent directory
+
+ test_path = os.path.dirname(path)
+
+ while test_path:
+ if test_path + '/' in self.units_paths:
+ return {
+ 'units': test_path + '/',
+ }
+
+ test_path = os.path.dirname(test_path)
+
+ if is_subdir(path, data_context().content.module_path):
+ module_name = self.module_names_by_path.get(path)
+
+ if module_name:
+ return {
+ 'units': module_name if module_name in self.units_modules else None,
+ 'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
+ 'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
+ 'network-integration': self.network_integration_by_module.get(module_name),
+ FOCUSED_TARGET: True,
+ }
+
+ return minimal
+
+ if is_subdir(path, data_context().content.module_utils_path):
+ if ext == '.cs':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.psm1':
+ return minimal # already expanded using get_dependent_paths
+
+ if ext == '.py':
+ return minimal # already expanded using get_dependent_paths
+
+ if is_subdir(path, data_context().content.plugin_paths['action']):
+ if ext == '.py':
+ if name.startswith('net_'):
+ network_target = 'network/.*_%s' % name[4:]
+
+ if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if self.prefixes.get(name) == 'network':
+ network_platform = name
+ elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
+ network_platform = name[:-7]
+ elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
+ network_platform = name[:-9]
+ else:
+ network_platform = None
+
+ if network_platform:
+ network_target = 'network/%s/' % network_platform
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['connection']):
+ units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection')
+ if name == '__init__':
+ return {
+ 'integration': self.integration_all_target,
+ 'windows-integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': units_dir,
+ }
+
+ units_path = os.path.join(units_dir, 'test_%s.py' % name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ integration_name = 'connection_%s' % name
+
+ if integration_name not in self.integration_targets_by_name:
+ integration_name = None
+
+ windows_integration_name = 'connection_windows_%s' % name
+
+ if windows_integration_name not in self.integration_targets_by_name:
+ windows_integration_name = None
+
+ # entire integration test commands depend on these connection plugins
+
+ if name in ['winrm', 'psrp']:
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'local':
+ return {
+ 'integration': self.integration_all_target,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'network_cli':
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if name == 'paramiko_ssh':
+ return {
+ 'integration': integration_name,
+ 'network-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ # other connection plugins have isolated integration and unit tests
+
+ return {
+ 'integration': integration_name,
+ 'windows-integration': windows_integration_name,
+ 'units': units_path,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
+ return {
+ 'sanity': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['inventory']):
+ if name == '__init__':
+ return all_tests(self.args) # broad impact, run all tests
+
+ # These inventory plugins are enabled by default (see INVENTORY_ENABLED).
+ # Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
+ test_all = [
+ 'host_list',
+ 'script',
+ 'yaml',
+ 'ini',
+ 'auto',
+ ]
+
+ if name in test_all:
+ posix_integration_fallback = get_integration_all_target(self.args)
+ else:
+ posix_integration_fallback = None
+
+ target = self.integration_targets_by_name.get('inventory_%s' % name)
+ units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory')
+ units_path = os.path.join(units_dir, 'test_%s.py' % name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
+ 'windows-integration': target.name if target and 'windows/' in target.aliases else None,
+ 'network-integration': target.name if target and 'network/' in target.aliases else None,
+ 'units': units_path,
+ FOCUSED_TARGET: target is not None,
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['filter']):
+ return self._simple_plugin_tests('filter', name)
+
+ if is_subdir(path, data_context().content.plugin_paths['lookup']):
+ return self._simple_plugin_tests('lookup', name)
+
+ if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
+ is_subdir(path, data_context().content.plugin_paths['cliconf']) or
+ is_subdir(path, data_context().content.plugin_paths['netconf'])):
+ if ext == '.py':
+ if name in self.prefixes and self.prefixes[name] == 'network':
+ network_target = 'network/%s/' % name
+
+ if network_target in self.integration_targets_by_alias:
+ return {
+ 'network-integration': network_target,
+ 'units': 'all',
+ }
+
+ display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
+
+ return {
+ 'units': 'all',
+ }
+
+ return {
+ 'network-integration': self.integration_all_target,
+ 'units': 'all',
+ }
+
+ if is_subdir(path, data_context().content.plugin_paths['test']):
+ return self._simple_plugin_tests('test', name)
+
+ return None
+
+ def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules specific to collections."""
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ filename = os.path.basename(path)
+ dummy, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitignore',
+ 'COPYING',
+ 'LICENSE',
+ 'Makefile',
+ ):
+ return minimal
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None
+
+ def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
+ """Return the classification for the given path using rules specific to Ansible."""
+ if path.startswith('test/units/compat/'):
+ return {
+ 'units': 'test/units/',
+ }
+
+ result = self._classify_common(path)
+
+ if result is not None:
+ return result
+
+ dirname = os.path.dirname(path)
+ filename = os.path.basename(path)
+ name, ext = os.path.splitext(filename)
+
+ minimal = {}
+
+ if path.startswith('bin/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('changelogs/'):
+ return minimal
+
+ if path.startswith('contrib/'):
+ return {
+ 'units': 'test/units/contrib/'
+ }
+
+ if path.startswith('docs/'):
+ return minimal
+
+ if path.startswith('examples/'):
+ if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
+ return {
+ 'windows-integration': 'connection_winrm',
+ }
+
+ return minimal
+
+ if path.startswith('hacking/'):
+ return minimal
+
+ if path.startswith('lib/ansible/executor/powershell/'):
+ units_path = 'test/units/executor/powershell/'
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return {
+ 'windows-integration': self.integration_all_target,
+ 'units': units_path,
+ }
+
+ if path.startswith('lib/ansible/'):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if path.startswith('licenses/'):
+ return minimal
+
+ if path.startswith('packaging/'):
+ if path.startswith('packaging/requirements/'):
+ if name.startswith('requirements-') and ext == '.txt':
+ component = name.split('-', 1)[1]
+
+ candidates = (
+ 'cloud/%s/' % component,
+ )
+
+ for candidate in candidates:
+ if candidate in self.integration_targets_by_alias:
+ return {
+ 'integration': candidate,
+ }
+
+ return all_tests(self.args) # broad impact, run all tests
+
+ return minimal
+
+ if path.startswith('test/ansible_test/'):
+ return minimal # these tests are not invoked from ansible-test
+
+ if path.startswith('test/lib/ansible_test/config/'):
+ if name.startswith('cloud-config-'):
+ # noinspection PyTypeChecker
+ cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/completion/'):
+ if path == 'test/lib/ansible_test/_data/completion/docker.txt':
+ return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
+
+ if path.startswith('test/lib/ansible_test/_internal/cloud/'):
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/lib/ansible_test/_internal/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/sanity/'):
+ return {
+ 'sanity': 'all', # test infrastructure, run all sanity checks
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_internal/units/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/units/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/pytest/'):
+ return {
+ 'units': 'all', # test infrastructure, run all unit tests
+ 'integration': 'ansible-test', # run ansible-test self tests
+ }
+
+ if path.startswith('test/lib/ansible_test/_data/requirements/'):
+ if name in (
+ 'integration',
+ 'network-integration',
+ 'windows-integration',
+ ):
+ return {
+ name: self.integration_all_target,
+ }
+
+ if name in (
+ 'sanity',
+ 'units',
+ ):
+ return {
+ name: 'all',
+ }
+
+ if name.startswith('integration.cloud.'):
+ cloud_target = 'cloud/%s/' % name.split('.')[2]
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ if path.startswith('test/lib/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/support/'):
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/shippable/'):
+ if dirname == 'test/utils/shippable':
+ test_map = {
+ 'cloud.sh': 'integration:cloud/',
+ 'linux.sh': 'integration:all',
+ 'network.sh': 'network-integration:all',
+ 'remote.sh': 'integration:all',
+ 'sanity.sh': 'sanity:all',
+ 'units.sh': 'units:all',
+ 'windows.sh': 'windows-integration:all',
+ }
+
+ test_match = test_map.get(filename)
+
+ if test_match:
+ test_command, test_target = test_match.split(':')
+
+ return {
+ test_command: test_target,
+ }
+
+ cloud_target = 'cloud/%s/' % name
+
+ if cloud_target in self.integration_targets_by_alias:
+ return {
+ 'integration': cloud_target,
+ }
+
+ return all_tests(self.args) # test infrastructure, run all tests
+
+ if path.startswith('test/utils/'):
+ return minimal
+
+ if '/' not in path:
+ if path in (
+ '.gitattributes',
+ '.gitignore',
+ '.mailmap',
+ 'COPYING',
+ 'Makefile',
+ ):
+ return minimal
+
+ if path in (
+ 'setup.py',
+ ):
+ return all_tests(self.args) # broad impact, run all tests
+
+ if ext in (
+ '.in',
+ '.md',
+ '.rst',
+ '.toml',
+ '.txt',
+ ):
+ return minimal
+
+ return None # unknown, will result in fall-back to run all tests
+
+ def _simple_plugin_tests(self, plugin_type, plugin_name): # type: (str, str) -> t.Dict[str, t.Optional[str]]
+ """
+ Return tests for the given plugin type and plugin name.
+ This function is useful for plugin types which do not require special processing.
+ """
+ if plugin_name == '__init__':
+ return all_tests(self.args, True)
+
+ integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
+
+ if integration_target:
+ integration_name = integration_target.name
+ else:
+ integration_name = None
+
+ units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
+
+ if units_path not in self.units_paths:
+ units_path = None
+
+ return dict(
+ integration=integration_name,
+ units=units_path,
+ )
+
+
+def all_tests(args, force=False):
+ """
+ :type args: TestConfig
+ :type force: bool
+ :rtype: dict[str, str]
+ """
+ if force:
+ integration_all_target = 'all'
+ else:
+ integration_all_target = get_integration_all_target(args)
+
+ return {
+ 'sanity': 'all',
+ 'units': 'all',
+ 'integration': integration_all_target,
+ 'windows-integration': integration_all_target,
+ 'network-integration': integration_all_target,
+ }
+
+
+def get_integration_all_target(args):
+ """
+ :type args: TestConfig
+ :rtype: str
+ """
+ if isinstance(args, IntegrationConfig):
+ return args.changed_all_target
+
+ return 'all'
diff --git a/test/lib/ansible_test/_internal/cli.py b/test/lib/ansible_test/_internal/cli.py
new file mode 100644
index 00000000..e406b2dd
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cli.py
@@ -0,0 +1,1217 @@
+"""Test runner for all Ansible tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+import sys
+
+# This import should occur as early as possible.
+# It must occur before subprocess has been imported anywhere in the current process.
+from .init import (
+ CURRENT_RLIMIT_NOFILE,
+)
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+ display,
+ raw_command,
+ generate_pip_command,
+ read_lines_without_comments,
+ MAXFD,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .delegation import (
+ check_delegation_args,
+ delegate,
+)
+
+from .executor import (
+ command_posix_integration,
+ command_network_integration,
+ command_windows_integration,
+ command_shell,
+ SUPPORTED_PYTHON_VERSIONS,
+ ApplicationWarning,
+ Delegate,
+ generate_pip_install,
+ check_startup,
+)
+
+from .config import (
+ PosixIntegrationConfig,
+ WindowsIntegrationConfig,
+ NetworkIntegrationConfig,
+ SanityConfig,
+ UnitsConfig,
+ ShellConfig,
+)
+
+from .env import (
+ EnvConfig,
+ command_env,
+ configure_timeout,
+)
+
+from .sanity import (
+ command_sanity,
+ sanity_init,
+ sanity_get_tests,
+)
+
+from .units import (
+ command_units,
+)
+
+from .target import (
+ find_target_completion,
+ walk_posix_integration_targets,
+ walk_network_integration_targets,
+ walk_windows_integration_targets,
+ walk_units_targets,
+ walk_sanity_targets,
+)
+
+from .core_ci import (
+ AWS_ENDPOINTS,
+)
+
+from .cloud import (
+ initialize_cloud_plugins,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ get_docker_completion,
+ get_network_completion,
+ get_remote_completion,
+ CommonConfig,
+)
+
+from .coverage.combine import (
+ command_coverage_combine,
+)
+
+from .coverage.erase import (
+ command_coverage_erase,
+)
+
+from .coverage.html import (
+ command_coverage_html,
+)
+
+from .coverage.report import (
+ command_coverage_report,
+ CoverageReportConfig,
+)
+
+from .coverage.xml import (
+ command_coverage_xml,
+)
+
+from .coverage.analyze.targets.generate import (
+ command_coverage_analyze_targets_generate,
+ CoverageAnalyzeTargetsGenerateConfig,
+)
+
+from .coverage.analyze.targets.expand import (
+ command_coverage_analyze_targets_expand,
+ CoverageAnalyzeTargetsExpandConfig,
+)
+
+from .coverage.analyze.targets.filter import (
+ command_coverage_analyze_targets_filter,
+ CoverageAnalyzeTargetsFilterConfig,
+)
+
+from .coverage.analyze.targets.combine import (
+ command_coverage_analyze_targets_combine,
+ CoverageAnalyzeTargetsCombineConfig,
+)
+
+from .coverage.analyze.targets.missing import (
+ command_coverage_analyze_targets_missing,
+ CoverageAnalyzeTargetsMissingConfig,
+)
+
+from .coverage import (
+ COVERAGE_GROUPS,
+ CoverageConfig,
+)
+
+if t.TYPE_CHECKING:
+ import argparse as argparse_module
+
+
+def main():
+ """Main program function."""
+ try:
+ os.chdir(data_context().content.root)
+ initialize_cloud_plugins()
+ sanity_init()
+ args = parse_args()
+ config = args.config(args) # type: CommonConfig
+ display.verbosity = config.verbosity
+ display.truncate = config.truncate
+ display.redact = config.redact
+ display.color = config.color
+ display.info_stderr = config.info_stderr
+ check_startup()
+ check_delegation_args(config)
+ configure_timeout(config)
+
+ display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
+ display.info('MAXFD: %d' % MAXFD, verbosity=2)
+
+ try:
+ args.func(config)
+ delegate_args = None
+ except Delegate as ex:
+ # save delegation args for use once we exit the exception handler
+ delegate_args = (ex.exclude, ex.require, ex.integration_targets)
+
+ if delegate_args:
+ # noinspection PyTypeChecker
+ delegate(config, *delegate_args)
+
+ display.review_warnings()
+ except ApplicationWarning as ex:
+ display.warning(u'%s' % ex)
+ sys.exit(0)
+ except ApplicationError as ex:
+ display.error(u'%s' % ex)
+ sys.exit(1)
+ except KeyboardInterrupt:
+ sys.exit(2)
+ except IOError as ex:
+ if ex.errno == errno.EPIPE:
+ sys.exit(3)
+ raise
+
+
+def parse_args():
+ """Parse command line arguments."""
+ try:
+ import argparse
+ except ImportError:
+ if '--requirements' not in sys.argv:
+ raise
+ # install argparse without using constraints since pip may be too old to support them
+ # not using the ansible-test requirements file since this install is for sys.executable rather than the delegated python (which may be different)
+ # argparse has no special requirements, so upgrading pip is not required here
+ raw_command(generate_pip_install(generate_pip_command(sys.executable), '', packages=['argparse'], use_constraints=False))
+ import argparse
+
+ try:
+ import argcomplete
+ except ImportError:
+ argcomplete = None
+
+ if argcomplete:
+ epilog = 'Tab completion available using the "argcomplete" python package.'
+ else:
+ epilog = 'Install the "argcomplete" python package to enable tab completion.'
+
+ def key_value_type(value): # type: (str) -> t.Tuple[str, str]
+ """Wrapper around key_value."""
+ return key_value(argparse, value)
+
+ parser = argparse.ArgumentParser(epilog=epilog)
+
+ common = argparse.ArgumentParser(add_help=False)
+
+ common.add_argument('-e', '--explain',
+ action='store_true',
+ help='explain commands that would be executed')
+
+ common.add_argument('-v', '--verbose',
+ dest='verbosity',
+ action='count',
+ default=0,
+ help='display more output')
+
+ common.add_argument('--color',
+ metavar='COLOR',
+ nargs='?',
+ help='generate color output: %(choices)s',
+ choices=('yes', 'no', 'auto'),
+ const='yes',
+ default='auto')
+
+ common.add_argument('--debug',
+ action='store_true',
+ help='run ansible commands in debug mode')
+
+ # noinspection PyTypeChecker
+ common.add_argument('--truncate',
+ dest='truncate',
+ metavar='COLUMNS',
+ type=int,
+ default=display.columns,
+ help='truncate some long output (0=disabled) (default: auto)')
+
+ common.add_argument('--redact',
+ dest='redact',
+ action='store_true',
+ default=True,
+ help='redact sensitive values in output')
+
+ common.add_argument('--no-redact',
+ dest='redact',
+ action='store_false',
+ default=False,
+ help='show sensitive values in output')
+
+ common.add_argument('--check-python',
+ choices=SUPPORTED_PYTHON_VERSIONS,
+ help=argparse.SUPPRESS)
+
+ test = argparse.ArgumentParser(add_help=False, parents=[common])
+
+ test.add_argument('include',
+ metavar='TARGET',
+ nargs='*',
+ help='test the specified target').completer = complete_target
+
+ test.add_argument('--include',
+ metavar='TARGET',
+ action='append',
+ help='include the specified target').completer = complete_target
+
+ test.add_argument('--exclude',
+ metavar='TARGET',
+ action='append',
+ help='exclude the specified target').completer = complete_target
+
+ test.add_argument('--require',
+ metavar='TARGET',
+ action='append',
+ help='require the specified target').completer = complete_target
+
+ test.add_argument('--coverage',
+ action='store_true',
+ help='analyze code coverage when running tests')
+
+ test.add_argument('--coverage-label',
+ default='',
+ help='label to include in coverage output file names')
+
+ test.add_argument('--coverage-check',
+ action='store_true',
+ help='only verify code coverage can be enabled')
+
+ test.add_argument('--metadata',
+ help=argparse.SUPPRESS)
+
+ test.add_argument('--base-branch',
+ help='base branch used for change detection')
+
+ add_changes(test, argparse)
+ add_environments(test)
+
+ integration = argparse.ArgumentParser(add_help=False, parents=[test])
+
+ integration.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ integration.add_argument('--start-at',
+ metavar='TARGET',
+ help='start at the specified target').completer = complete_target
+
+ integration.add_argument('--start-at-task',
+ metavar='TASK',
+ help='start at the specified task')
+
+ integration.add_argument('--tags',
+ metavar='TAGS',
+ help='only run plays and tasks tagged with these values')
+
+ integration.add_argument('--skip-tags',
+ metavar='TAGS',
+ help='only run plays and tasks whose tags do not match these values')
+
+ integration.add_argument('--diff',
+ action='store_true',
+ help='show diff output')
+
+ integration.add_argument('--allow-destructive',
+ action='store_true',
+ help='allow destructive tests')
+
+ integration.add_argument('--allow-root',
+ action='store_true',
+ help='allow tests requiring root when not root')
+
+ integration.add_argument('--allow-disabled',
+ action='store_true',
+ help='allow tests which have been marked as disabled')
+
+ integration.add_argument('--allow-unstable',
+ action='store_true',
+ help='allow tests which have been marked as unstable')
+
+ integration.add_argument('--allow-unstable-changed',
+ action='store_true',
+ help='allow tests which have been marked as unstable when focused changes are detected')
+
+ integration.add_argument('--allow-unsupported',
+ action='store_true',
+ help='allow tests which have been marked as unsupported')
+
+ integration.add_argument('--retry-on-error',
+ action='store_true',
+ help='retry failed test with increased verbosity')
+
+ integration.add_argument('--continue-on-error',
+ action='store_true',
+ help='continue after failed test')
+
+ integration.add_argument('--debug-strategy',
+ action='store_true',
+ help='run test playbooks using the debug strategy')
+
+ integration.add_argument('--changed-all-target',
+ metavar='TARGET',
+ default='all',
+ help='target to run when all tests are needed')
+
+ integration.add_argument('--changed-all-mode',
+ metavar='MODE',
+ choices=('default', 'include', 'exclude'),
+ help='include/exclude behavior with --changed-all-target: %(choices)s')
+
+ integration.add_argument('--list-targets',
+ action='store_true',
+ help='list matching targets instead of running tests')
+
+ integration.add_argument('--no-temp-workdir',
+ action='store_true',
+ help='do not run tests from a temporary directory (use only for verifying broken tests)')
+
+ integration.add_argument('--no-temp-unicode',
+ action='store_true',
+ help='avoid unicode characters in temporary directory (use only for verifying broken tests)')
+
+ subparsers = parser.add_subparsers(metavar='COMMAND')
+ subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ posix_integration = subparsers.add_parser('integration',
+ parents=[integration],
+ help='posix integration tests')
+
+ posix_integration.set_defaults(func=command_posix_integration,
+ targets=walk_posix_integration_targets,
+ config=PosixIntegrationConfig)
+
+ add_extra_docker_options(posix_integration)
+ add_httptester_options(posix_integration, argparse)
+
+ network_integration = subparsers.add_parser('network-integration',
+ parents=[integration],
+ help='network integration tests')
+
+ network_integration.set_defaults(func=command_network_integration,
+ targets=walk_network_integration_targets,
+ config=NetworkIntegrationConfig)
+
+ add_extra_docker_options(network_integration, integration=False)
+
+ network_integration.add_argument('--platform',
+ metavar='PLATFORM',
+ action='append',
+ help='network platform/version').completer = complete_network_platform
+
+ network_integration.add_argument('--platform-collection',
+ type=key_value_type,
+ metavar='PLATFORM=COLLECTION',
+ action='append',
+ help='collection used to test platform').completer = complete_network_platform_collection
+
+ network_integration.add_argument('--platform-connection',
+ type=key_value_type,
+ metavar='PLATFORM=CONNECTION',
+ action='append',
+ help='connection used to test platform').completer = complete_network_platform_connection
+
+ network_integration.add_argument('--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests')
+
+ network_integration.add_argument('--testcase',
+ metavar='TESTCASE',
+ help='limit a test to a specified testcase').completer = complete_network_testcase
+
+ windows_integration = subparsers.add_parser('windows-integration',
+ parents=[integration],
+ help='windows integration tests')
+
+ windows_integration.set_defaults(func=command_windows_integration,
+ targets=walk_windows_integration_targets,
+ config=WindowsIntegrationConfig)
+
+ add_extra_docker_options(windows_integration, integration=False)
+ add_httptester_options(windows_integration, argparse)
+
+ windows_integration.add_argument('--windows',
+ metavar='VERSION',
+ action='append',
+ help='windows version').completer = complete_windows
+
+ windows_integration.add_argument('--inventory',
+ metavar='PATH',
+ help='path to inventory used for tests')
+
+ units = subparsers.add_parser('units',
+ parents=[test],
+ help='unit tests')
+
+ units.set_defaults(func=command_units,
+ targets=walk_units_targets,
+ config=UnitsConfig)
+
+ units.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ units.add_argument('--collect-only',
+ action='store_true',
+ help='collect tests but do not execute them')
+
+ # noinspection PyTypeChecker
+ units.add_argument('--num-workers',
+ type=int,
+ help='number of workers to use (default: auto)')
+
+ units.add_argument('--requirements-mode',
+ choices=('only', 'skip'),
+ help=argparse.SUPPRESS)
+
+ add_extra_docker_options(units, integration=False)
+
+ sanity = subparsers.add_parser('sanity',
+ parents=[test],
+ help='sanity tests')
+
+ sanity.set_defaults(func=command_sanity,
+ targets=walk_sanity_targets,
+ config=SanityConfig)
+
+ sanity.add_argument('--test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to run').completer = complete_sanity_test
+
+ sanity.add_argument('--skip-test',
+ metavar='TEST',
+ action='append',
+ choices=[test.name for test in sanity_get_tests()],
+ help='tests to skip').completer = complete_sanity_test
+
+ sanity.add_argument('--allow-disabled',
+ action='store_true',
+ help='allow tests to run which are disabled by default')
+
+ sanity.add_argument('--list-tests',
+ action='store_true',
+ help='list available tests')
+
+ sanity.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ sanity.add_argument('--enable-optional-errors',
+ action='store_true',
+ help='enable optional errors')
+
+ add_lint(sanity)
+ add_extra_docker_options(sanity, integration=False)
+
+ shell = subparsers.add_parser('shell',
+ parents=[common],
+ help='open an interactive shell')
+
+ shell.add_argument('--python',
+ metavar='VERSION',
+ choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
+ help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
+
+ shell.set_defaults(func=command_shell,
+ config=ShellConfig)
+
+ shell.add_argument('--raw',
+ action='store_true',
+ help='direct to shell with no setup')
+
+ add_environments(shell)
+ add_extra_docker_options(shell)
+ add_httptester_options(shell, argparse)
+
+ coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
+
+ add_environments(coverage_common, isolated_delegation=False)
+
+ coverage = subparsers.add_parser('coverage',
+ help='code coverage management and reporting')
+
+ coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
+ coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ add_coverage_analyze(coverage_subparsers, coverage_common)
+
+ coverage_combine = coverage_subparsers.add_parser('combine',
+ parents=[coverage_common],
+ help='combine coverage data and rewrite remote paths')
+
+ coverage_combine.set_defaults(func=command_coverage_combine,
+ config=CoverageConfig)
+
+ coverage_combine.add_argument('--export',
+ help='directory to export combined coverage files to')
+
+ add_extra_coverage_options(coverage_combine)
+
+ coverage_erase = coverage_subparsers.add_parser('erase',
+ parents=[coverage_common],
+ help='erase coverage data files')
+
+ coverage_erase.set_defaults(func=command_coverage_erase,
+ config=CoverageConfig)
+
+ coverage_report = coverage_subparsers.add_parser('report',
+ parents=[coverage_common],
+ help='generate console coverage report')
+
+ coverage_report.set_defaults(func=command_coverage_report,
+ config=CoverageReportConfig)
+
+ coverage_report.add_argument('--show-missing',
+ action='store_true',
+ help='show line numbers of statements not executed')
+ coverage_report.add_argument('--include',
+ metavar='PAT1,PAT2,...',
+ help='include only files whose paths match one of these '
+ 'patterns. Accepts shell-style wildcards, which must be '
+ 'quoted.')
+ coverage_report.add_argument('--omit',
+ metavar='PAT1,PAT2,...',
+ help='omit files whose paths match one of these patterns. '
+ 'Accepts shell-style wildcards, which must be quoted.')
+
+ add_extra_coverage_options(coverage_report)
+
+ coverage_html = coverage_subparsers.add_parser('html',
+ parents=[coverage_common],
+ help='generate html coverage report')
+
+ coverage_html.set_defaults(func=command_coverage_html,
+ config=CoverageConfig)
+
+ add_extra_coverage_options(coverage_html)
+
+ coverage_xml = coverage_subparsers.add_parser('xml',
+ parents=[coverage_common],
+ help='generate xml coverage report')
+
+ coverage_xml.set_defaults(func=command_coverage_xml,
+ config=CoverageConfig)
+
+ add_extra_coverage_options(coverage_xml)
+
+ env = subparsers.add_parser('env',
+ parents=[common],
+ help='show information about the test environment')
+
+ env.set_defaults(func=command_env,
+ config=EnvConfig)
+
+ env.add_argument('--show',
+ action='store_true',
+ help='show environment on stdout')
+
+ env.add_argument('--dump',
+ action='store_true',
+ help='dump environment to disk')
+
+ env.add_argument('--list-files',
+ action='store_true',
+ help='list files on stdout')
+
+ # noinspection PyTypeChecker
+ env.add_argument('--timeout',
+ type=int,
+ metavar='MINUTES',
+ help='timeout for future ansible-test commands (0 clears)')
+
+ if argcomplete:
+ argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
+
+ args = parser.parse_args()
+
+ if args.explain and not args.verbosity:
+ args.verbosity = 1
+
+ if args.color == 'yes':
+ args.color = True
+ elif args.color == 'no':
+ args.color = False
+ else:
+ args.color = sys.stdout.isatty()
+
+ return args
+
+
+def key_value(argparse, value): # type: (argparse_module, str) -> t.Tuple[str, str]
+ """Type parsing and validation for argparse key/value pairs separated by an '=' character."""
+ parts = value.split('=')
+
+ if len(parts) != 2:
+ raise argparse.ArgumentTypeError('"%s" must be in the format "key=value"' % value)
+
+ return parts[0], parts[1]
+
+
+# noinspection PyProtectedMember
+def add_coverage_analyze(coverage_subparsers, coverage_common): # type: (argparse_module._SubParsersAction, argparse_module.ArgumentParser) -> None
+ """Add the `coverage analyze` subcommand."""
+ analyze = coverage_subparsers.add_parser(
+ 'analyze',
+ help='analyze collected coverage data',
+ )
+
+ analyze_subparsers = analyze.add_subparsers(metavar='COMMAND')
+ analyze_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ targets = analyze_subparsers.add_parser(
+ 'targets',
+ help='analyze integration test target coverage',
+ )
+
+ targets_subparsers = targets.add_subparsers(metavar='COMMAND')
+ targets_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
+
+ targets_generate = targets_subparsers.add_parser(
+ 'generate',
+ parents=[coverage_common],
+ help='aggregate coverage by integration test target',
+ )
+
+ targets_generate.set_defaults(
+ func=command_coverage_analyze_targets_generate,
+ config=CoverageAnalyzeTargetsGenerateConfig,
+ )
+
+ targets_generate.add_argument(
+ 'input_dir',
+ nargs='?',
+ help='directory to read coverage from',
+ )
+
+ targets_generate.add_argument(
+ 'output_file',
+ help='output file for aggregated coverage',
+ )
+
+ targets_expand = targets_subparsers.add_parser(
+ 'expand',
+ parents=[coverage_common],
+ help='expand target names from integers in aggregated coverage',
+ )
+
+ targets_expand.set_defaults(
+ func=command_coverage_analyze_targets_expand,
+ config=CoverageAnalyzeTargetsExpandConfig,
+ )
+
+ targets_expand.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_expand.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ targets_filter = targets_subparsers.add_parser(
+ 'filter',
+ parents=[coverage_common],
+ help='filter aggregated coverage data',
+ )
+
+ targets_filter.set_defaults(
+ func=command_coverage_analyze_targets_filter,
+ config=CoverageAnalyzeTargetsFilterConfig,
+ )
+
+ targets_filter.add_argument(
+ 'input_file',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_filter.add_argument(
+ 'output_file',
+ help='output file to write expanded coverage to',
+ )
+
+ targets_filter.add_argument(
+ '--include-target',
+ dest='include_targets',
+ action='append',
+ help='include the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-target',
+ dest='exclude_targets',
+ action='append',
+ help='exclude the specified targets',
+ )
+
+ targets_filter.add_argument(
+ '--include-path',
+ help='include paths matching the given regex',
+ )
+
+ targets_filter.add_argument(
+ '--exclude-path',
+ help='exclude paths matching the given regex',
+ )
+
+ targets_combine = targets_subparsers.add_parser(
+ 'combine',
+ parents=[coverage_common],
+ help='combine multiple aggregated coverage files',
+ )
+
+ targets_combine.set_defaults(
+ func=command_coverage_analyze_targets_combine,
+ config=CoverageAnalyzeTargetsCombineConfig,
+ )
+
+ targets_combine.add_argument(
+ 'input_file',
+ nargs='+',
+ help='input file to read aggregated coverage from',
+ )
+
+ targets_combine.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ targets_missing = targets_subparsers.add_parser(
+ 'missing',
+ parents=[coverage_common],
+ help='identify coverage in one file missing in another',
+ )
+
+ targets_missing.set_defaults(
+ func=command_coverage_analyze_targets_missing,
+ config=CoverageAnalyzeTargetsMissingConfig,
+ )
+
+ targets_missing.add_argument(
+ 'from_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'to_file',
+ help='input file containing aggregated coverage',
+ )
+
+ targets_missing.add_argument(
+ 'output_file',
+ help='output file to write aggregated coverage to',
+ )
+
+ targets_missing.add_argument(
+ '--only-gaps',
+ action='store_true',
+ help='report only arcs/lines not hit by any target',
+ )
+
+ targets_missing.add_argument(
+ '--only-exists',
+ action='store_true',
+ help='limit results to files that exist',
+ )
+
+
+def add_lint(parser):
+ """
+ :type parser: argparse.ArgumentParser
+ """
+ parser.add_argument('--lint',
+ action='store_true',
+ help='write lint output to stdout, everything else stderr')
+
+ parser.add_argument('--junit',
+ action='store_true',
+ help='write test failures to junit xml files')
+
+ parser.add_argument('--failure-ok',
+ action='store_true',
+ help='exit successfully on failed tests after saving results')
+
+
+def add_changes(parser, argparse):
+ """
+ :type parser: argparse.ArgumentParser
+ :type argparse: argparse
+ """
+ parser.add_argument('--changed', action='store_true', help='limit targets based on changes')
+
+ changes = parser.add_argument_group(title='change detection arguments')
+
+ changes.add_argument('--tracked', action='store_true', help=argparse.SUPPRESS)
+ changes.add_argument('--untracked', action='store_true', help='include untracked files')
+ changes.add_argument('--ignore-committed', dest='committed', action='store_false', help='exclude committed files')
+ changes.add_argument('--ignore-staged', dest='staged', action='store_false', help='exclude staged files')
+ changes.add_argument('--ignore-unstaged', dest='unstaged', action='store_false', help='exclude unstaged files')
+
+ changes.add_argument('--changed-from', metavar='PATH', help=argparse.SUPPRESS)
+ changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS)
+
+
+def add_environments(parser, isolated_delegation=True):
+ """
+ :type parser: argparse.ArgumentParser
+ :type isolated_delegation: bool
+ """
+ parser.add_argument('--requirements',
+ action='store_true',
+ help='install command requirements')
+
+ parser.add_argument('--python-interpreter',
+ metavar='PATH',
+ default=None,
+ help='path to the docker or remote python interpreter')
+
+ parser.add_argument('--no-pip-check',
+ dest='pip_check',
+ default=True,
+ action='store_false',
+ help='do not run "pip check" to verify requirements')
+
+ environments = parser.add_mutually_exclusive_group()
+
+ environments.add_argument('--local',
+ action='store_true',
+ help='run from the local environment')
+
+ environments.add_argument('--venv',
+ action='store_true',
+ help='run from ansible-test managed virtual environments')
+
+ venv = parser.add_argument_group(title='venv arguments')
+
+ venv.add_argument('--venv-system-site-packages',
+ action='store_true',
+ help='enable system site packages')
+
+ if not isolated_delegation:
+ environments.set_defaults(
+ docker=None,
+ remote=None,
+ remote_stage=None,
+ remote_provider=None,
+ remote_aws_region=None,
+ remote_terminate=None,
+ remote_endpoint=None,
+ python_interpreter=None,
+ )
+
+ return
+
+ environments.add_argument('--docker',
+ metavar='IMAGE',
+ nargs='?',
+ default=None,
+ const='default',
+ help='run from a docker container').completer = complete_docker
+
+ environments.add_argument('--remote',
+ metavar='PLATFORM',
+ default=None,
+ help='run from a remote instance').completer = complete_remote_shell if parser.prog.endswith(' shell') else complete_remote
+
+ remote = parser.add_argument_group(title='remote arguments')
+
+ remote.add_argument('--remote-stage',
+ metavar='STAGE',
+ help='remote stage to use: prod, dev',
+ default='prod').completer = complete_remote_stage
+
+ remote.add_argument('--remote-provider',
+ metavar='PROVIDER',
+ help='remote provider to use: %(choices)s',
+ choices=['default', 'aws', 'azure', 'parallels', 'ibmvpc', 'ibmps'],
+ default='default')
+
+ remote.add_argument('--remote-endpoint',
+ metavar='ENDPOINT',
+ help='remote provisioning endpoint to use (default: auto)',
+ default=None)
+
+ remote.add_argument('--remote-aws-region',
+ metavar='REGION',
+ help='remote aws region to use: %(choices)s (default: auto)',
+ choices=sorted(AWS_ENDPOINTS),
+ default=None)
+
+ remote.add_argument('--remote-terminate',
+ metavar='WHEN',
+ help='terminate remote instance: %(choices)s (default: %(default)s)',
+ choices=['never', 'always', 'success'],
+ default='never')
+
+
+def add_extra_coverage_options(parser):
+ """
+ :type parser: argparse.ArgumentParser
+ """
+ parser.add_argument('--group-by',
+ metavar='GROUP',
+ action='append',
+ choices=COVERAGE_GROUPS,
+ help='group output by: %s' % ', '.join(COVERAGE_GROUPS))
+
+ parser.add_argument('--all',
+ action='store_true',
+ help='include all python/powershell source files')
+
+ parser.add_argument('--stub',
+ action='store_true',
+ help='generate empty report of all python/powershell source files')
+
+
+def add_httptester_options(parser, argparse):
+ """
+ :type parser: argparse.ArgumentParser
+ :type argparse: argparse
+ """
+ group = parser.add_mutually_exclusive_group()
+
+ group.add_argument('--httptester',
+ metavar='IMAGE',
+ default='quay.io/ansible/http-test-container:1.0.0',
+ help='docker image to use for the httptester container')
+
+ group.add_argument('--disable-httptester',
+ dest='httptester',
+ action='store_const',
+ const='',
+ help='do not use the httptester container')
+
+ parser.add_argument('--inject-httptester',
+ action='store_true',
+ help=argparse.SUPPRESS) # internal use only
+
+
+def add_extra_docker_options(parser, integration=True):
+ """
+ :type parser: argparse.ArgumentParser
+ :type integration: bool
+ """
+ docker = parser.add_argument_group(title='docker arguments')
+
+ docker.add_argument('--docker-no-pull',
+ action='store_false',
+ dest='docker_pull',
+ help='do not explicitly pull the latest docker images')
+
+ if data_context().content.is_ansible:
+ docker.add_argument('--docker-keep-git',
+ action='store_true',
+ help='transfer git related files into the docker container')
+ else:
+ docker.set_defaults(
+ docker_keep_git=False,
+ )
+
+ docker.add_argument('--docker-seccomp',
+ metavar='SC',
+ choices=('default', 'unconfined'),
+ default=None,
+ help='set seccomp confinement for the test container: %(choices)s')
+
+ docker.add_argument('--docker-terminate',
+ metavar='WHEN',
+ help='terminate docker container: %(choices)s (default: %(default)s)',
+ choices=['never', 'always', 'success'],
+ default='always')
+
+ if not integration:
+ return
+
+ docker.add_argument('--docker-privileged',
+ action='store_true',
+ help='run docker container in privileged mode')
+
+ docker.add_argument('--docker-network',
+ help='run using the specified docker network')
+
+ # noinspection PyTypeChecker
+ docker.add_argument('--docker-memory',
+ help='memory limit for docker in bytes', type=int)
+
+
+# noinspection PyUnusedLocal
+def complete_remote_stage(prefix, parsed_args, **_): # pylint: disable=unused-argument
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
+
+
+def complete_target(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ return find_target_completion(parsed_args.targets, prefix)
+
+
+# noinspection PyUnusedLocal
+def complete_remote(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_remote_completion().keys())
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+# noinspection PyUnusedLocal
+def complete_remote_shell(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_remote_completion().keys())
+
+ # 2008 doesn't support SSH so we do not add to the list of valid images
+ windows_completion_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt')
+ images.extend(["windows/%s" % i for i in read_lines_without_comments(windows_completion_path, remove_blank_lines=True) if i != '2008'])
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+# noinspection PyUnusedLocal
+def complete_docker(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ images = sorted(get_docker_completion().keys())
+
+ return [i for i in images if i.startswith(prefix)]
+
+
+def complete_windows(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', 'windows.txt'), remove_blank_lines=True)
+
+ return [i for i in images if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
+
+
+def complete_network_platform(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ images = sorted(get_network_completion())
+
+ return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
+
+
+def complete_network_platform_collection(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ left = prefix.split('=')[0]
+ images = sorted(set(image.split('/')[0] for image in get_network_completion()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
+
+
+def complete_network_platform_connection(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ left = prefix.split('=')[0]
+ images = sorted(set(image.split('/')[0] for image in get_network_completion()))
+
+ return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
+
+
+def complete_network_testcase(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ testcases = []
+
+ # since testcases are module specific, don't autocomplete if more than one
+ # module is specidied
+ if len(parsed_args.include) != 1:
+ return []
+
+ test_dir = os.path.join(data_context().content.integration_targets_path, parsed_args.include[0], 'tests')
+ connection_dirs = data_context().content.get_dirs(test_dir)
+
+ for connection_dir in connection_dirs:
+ for testcase in [os.path.basename(path) for path in data_context().content.get_files(connection_dir)]:
+ if testcase.startswith(prefix):
+ testcases.append(testcase.split('.')[0])
+
+ return testcases
+
+
+# noinspection PyUnusedLocal
+def complete_sanity_test(prefix, parsed_args, **_):
+ """
+ :type prefix: unicode
+ :type parsed_args: any
+ :rtype: list[str]
+ """
+ del parsed_args
+
+ tests = sorted(test.name for test in sanity_get_tests())
+
+ return [i for i in tests if i.startswith(prefix)]
diff --git a/test/lib/ansible_test/_internal/cloud/__init__.py b/test/lib/ansible_test/_internal/cloud/__init__.py
new file mode 100644
index 00000000..04f592c4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/__init__.py
@@ -0,0 +1,429 @@
+"""Plugin system for cloud providers and environments for use in integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import atexit
+import datetime
+import time
+import os
+import re
+import tempfile
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ import_plugins,
+ load_plugins,
+ ABC,
+ ANSIBLE_TEST_CONFIG_ROOT,
+)
+
+from ..util_common import (
+ write_json_test_results,
+ ResultType,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..config import (
+ IntegrationConfig,
+)
+
+from ..ci import (
+ get_ci_provider,
+)
+
+from ..data import (
+ data_context,
+)
+
+PROVIDERS = {}
+ENVIRONMENTS = {}
+
+
+def initialize_cloud_plugins():
+ """Import cloud plugins and load them into the plugin dictionaries."""
+ import_plugins('cloud')
+
+ load_plugins(CloudProvider, PROVIDERS)
+ load_plugins(CloudEnvironment, ENVIRONMENTS)
+
+
+def get_cloud_platforms(args, targets=None):
+ """
+ :type args: TestConfig
+ :type targets: tuple[IntegrationTarget] | None
+ :rtype: list[str]
+ """
+ if isinstance(args, IntegrationConfig):
+ if args.list_targets:
+ return []
+
+ if targets is None:
+ cloud_platforms = set(args.metadata.cloud_config or [])
+ else:
+ cloud_platforms = set(get_cloud_platform(target) for target in targets)
+
+ cloud_platforms.discard(None)
+
+ return sorted(cloud_platforms)
+
+
+def get_cloud_platform(target):
+ """
+ :type target: IntegrationTarget
+ :rtype: str | None
+ """
+ cloud_platforms = set(a.split('/')[1] for a in target.aliases if a.startswith('cloud/') and a.endswith('/') and a != 'cloud/')
+
+ if not cloud_platforms:
+ return None
+
+ if len(cloud_platforms) == 1:
+ cloud_platform = cloud_platforms.pop()
+
+ if cloud_platform not in PROVIDERS:
+ raise ApplicationError('Target %s aliases contains unknown cloud platform: %s' % (target.name, cloud_platform))
+
+ return cloud_platform
+
+ raise ApplicationError('Target %s aliases contains multiple cloud platforms: %s' % (target.name, ', '.join(sorted(cloud_platforms))))
+
+
+def get_cloud_providers(args, targets=None):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget] | None
+ :rtype: list[CloudProvider]
+ """
+ return [PROVIDERS[p](args) for p in get_cloud_platforms(args, targets)]
+
+
+def get_cloud_environment(args, target):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :rtype: CloudEnvironment
+ """
+ cloud_platform = get_cloud_platform(target)
+
+ if not cloud_platform:
+ return None
+
+ return ENVIRONMENTS[cloud_platform](args)
+
+
+def cloud_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :return: list[str]
+ """
+ if args.metadata.cloud_config is not None:
+ return [] # cloud filter already performed prior to delegation
+
+ exclude = []
+
+ for provider in get_cloud_providers(args, targets):
+ provider.filter(targets, exclude)
+
+ return exclude
+
+
+def cloud_init(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ """
+ if args.metadata.cloud_config is not None:
+ return # cloud configuration already established prior to delegation
+
+ args.metadata.cloud_config = {}
+
+ results = {}
+
+ for provider in get_cloud_providers(args, targets):
+ args.metadata.cloud_config[provider.platform] = {}
+
+ start_time = time.time()
+ provider.setup()
+ end_time = time.time()
+
+ results[provider.platform] = dict(
+ platform=provider.platform,
+ setup_seconds=int(end_time - start_time),
+ targets=[target.name for target in targets],
+ )
+
+ if not args.explain and results:
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ clouds=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+
+class CloudBase(ABC):
+ """Base class for cloud plugins."""
+ __metaclass__ = abc.ABCMeta
+
+ _CONFIG_PATH = 'config_path'
+ _RESOURCE_PREFIX = 'resource_prefix'
+ _MANAGED = 'managed'
+ _SETUP_EXECUTED = 'setup_executed'
+
+ def __init__(self, args):
+ """
+ :type args: IntegrationConfig
+ """
+ self.args = args
+ self.platform = self.__module__.split('.')[-1]
+
+ def config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add the config file to the payload file list."""
+ if self._get_cloud_config(self._CONFIG_PATH, ''):
+ pair = (self.config_path, os.path.relpath(self.config_path, data_context().content.root))
+
+ if pair not in files:
+ display.info('Including %s config: %s -> %s' % (self.platform, pair[0], pair[1]), verbosity=3)
+ files.append(pair)
+
+ data_context().register_payload_callback(config_callback)
+
+ @property
+ def setup_executed(self):
+ """
+ :rtype: bool
+ """
+ return self._get_cloud_config(self._SETUP_EXECUTED, False)
+
+ @setup_executed.setter
+ def setup_executed(self, value):
+ """
+ :type value: bool
+ """
+ self._set_cloud_config(self._SETUP_EXECUTED, value)
+
+ @property
+ def config_path(self):
+ """
+ :rtype: str
+ """
+ return os.path.join(data_context().content.root, self._get_cloud_config(self._CONFIG_PATH))
+
+ @config_path.setter
+ def config_path(self, value):
+ """
+ :type value: str
+ """
+ self._set_cloud_config(self._CONFIG_PATH, value)
+
+ @property
+ def resource_prefix(self):
+ """
+ :rtype: str
+ """
+ return self._get_cloud_config(self._RESOURCE_PREFIX)
+
+ @resource_prefix.setter
+ def resource_prefix(self, value):
+ """
+ :type value: str
+ """
+ self._set_cloud_config(self._RESOURCE_PREFIX, value)
+
+ @property
+ def managed(self):
+ """
+ :rtype: bool
+ """
+ return self._get_cloud_config(self._MANAGED)
+
+ @managed.setter
+ def managed(self, value):
+ """
+ :type value: bool
+ """
+ self._set_cloud_config(self._MANAGED, value)
+
+ def _get_cloud_config(self, key, default=None):
+ """
+ :type key: str
+ :type default: str | int | bool | None
+ :rtype: str | int | bool
+ """
+ if default is not None:
+ return self.args.metadata.cloud_config[self.platform].get(key, default)
+
+ return self.args.metadata.cloud_config[self.platform][key]
+
+ def _set_cloud_config(self, key, value):
+ """
+ :type key: str
+ :type value: str | int | bool
+ """
+ self.args.metadata.cloud_config[self.platform][key] = value
+
+
+class CloudProvider(CloudBase):
+ """Base class for cloud provider plugins. Sets up cloud resources before delegation."""
+ def __init__(self, args, config_extension='.ini'):
+ """
+ :type args: IntegrationConfig
+ :type config_extension: str
+ """
+ super(CloudProvider, self).__init__(args)
+
+ self.ci_provider = get_ci_provider()
+ self.remove_config = False
+ self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension)
+ self.config_static_path = os.path.join(data_context().content.integration_path, self.config_static_name)
+ self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
+ self.config_extension = config_extension
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ self.resource_prefix = self.ci_provider.generate_resource_prefix()
+
+ atexit.register(self.cleanup)
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.remove_config:
+ os.remove(self.config_path)
+
+ def _use_static_config(self):
+ """
+ :rtype: bool
+ """
+ if os.path.isfile(self.config_static_path):
+ display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1)
+ self.config_path = self.config_static_path
+ static = True
+ else:
+ static = False
+
+ self.managed = not static
+
+ return static
+
+ def _write_config(self, content):
+ """
+ :type content: str
+ """
+ prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0]
+
+ with tempfile.NamedTemporaryFile(dir=data_context().content.integration_path, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd:
+ filename = os.path.join(data_context().content.integration_path, os.path.basename(config_fd.name))
+
+ self.config_path = filename
+ self.remove_config = True
+
+ display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3)
+
+ config_fd.write(to_bytes(content))
+ config_fd.flush()
+
+ def _read_config_template(self):
+ """
+ :rtype: str
+ """
+ lines = read_text_file(self.config_template_path).splitlines()
+ lines = [line for line in lines if not line.startswith('#')]
+ config = '\n'.join(lines).strip() + '\n'
+ return config
+
+ @staticmethod
+ def _populate_config_template(template, values):
+ """
+ :type template: str
+ :type values: dict[str, str]
+ :rtype: str
+ """
+ for key in sorted(values):
+ value = values[key]
+ template = template.replace('@%s' % key, value)
+
+ return template
+
+
+class CloudEnvironment(CloudBase):
+ """Base class for cloud environment plugins. Updates integration test environment after delegation."""
+ def setup_once(self):
+ """Run setup if it has not already been run."""
+ if self.setup_executed:
+ return
+
+ self.setup()
+ self.setup_executed = True
+
+ def setup(self):
+ """Setup which should be done once per environment instead of once per test target."""
+
+ @abc.abstractmethod
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+
+ def on_failure(self, target, tries):
+ """
+ :type target: IntegrationTarget
+ :type tries: int
+ """
+
+
+class CloudEnvironmentConfig:
+ """Configuration for the environment."""
+ def __init__(self, env_vars=None, ansible_vars=None, module_defaults=None, callback_plugins=None):
+ """
+ :type env_vars: dict[str, str] | None
+ :type ansible_vars: dict[str, any] | None
+ :type module_defaults: dict[str, dict[str, any]] | None
+ :type callback_plugins: list[str] | None
+ """
+ self.env_vars = env_vars
+ self.ansible_vars = ansible_vars
+ self.module_defaults = module_defaults
+ self.callback_plugins = callback_plugins
diff --git a/test/lib/ansible_test/_internal/cloud/acme.py b/test/lib/ansible_test/_internal/cloud/acme.py
new file mode 100644
index 00000000..3d0ace24
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/acme.py
@@ -0,0 +1,193 @@
+"""ACME plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+ ApplicationError,
+ SubprocessError,
+)
+
+from ..http import (
+ HttpClient,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class ACMEProvider(CloudProvider):
+ """ACME plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'acme-simulator'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(ACMEProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_ACME_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_ACME_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/acme-test-container:2.0.0'
+ self.container_name = ''
+
+ def _wait_for_service(self, protocol, acme_host, port, local_part, name):
+ """Wait for an endpoint to accept connections."""
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True, insecure=True)
+ endpoint = '%s://%s:%d/%s' % (protocol, acme_host, port, local_part)
+
+ for dummy in range(1, 30):
+ display.info('Waiting for %s: %s' % (name, endpoint), verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(1)
+
+ raise ApplicationError('Timeout waiting for %s.' % name)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(ACMEProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(ACMEProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Create a ACME test container using docker."""
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing ACME docker test container.', verbosity=1)
+ else:
+ display.info('Starting a new ACME docker test container.', verbosity=1)
+
+ if not container_id:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', '5000:5000', # control port for flask app in container
+ '-p', '14000:14000', # Pebble ACME CA
+ ]
+ else:
+ publish_ports = []
+
+ if not os.environ.get('ANSIBLE_ACME_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ acme_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ acme_host = self._get_simulator_address()
+ display.info('Found ACME test container address: %s' % acme_host, verbosity=1)
+ else:
+ acme_host = get_docker_hostname()
+
+ if container_id:
+ acme_host_ip = self._get_simulator_address()
+ else:
+ acme_host_ip = get_docker_hostname()
+
+ self._set_cloud_config('acme_host', acme_host)
+
+ self._wait_for_service('http', acme_host_ip, 5000, '', 'ACME controller')
+ self._wait_for_service('https', acme_host_ip, 14000, 'dir', 'ACME CA endpoint')
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError()
+
+
+class ACMEEnvironment(CloudEnvironment):
+ """ACME environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ ansible_vars = dict(
+ acme_host=self._get_cloud_config('acme_host'),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/aws.py b/test/lib/ansible_test/_internal/cloud/aws.py
new file mode 100644
index 00000000..190ef488
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/aws.py
@@ -0,0 +1,124 @@
+"""AWS plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ ApplicationError,
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class AwsCloudProvider(CloudProvider):
+ """AWS cloud provider plugin. Sets up cloud resources before delegation."""
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super(AwsCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(AwsCloudProvider, self).setup()
+
+ aws_config_path = os.path.expanduser('~/.aws')
+
+ if os.path.exists(aws_config_path) and not self.args.docker and not self.args.remote:
+ raise ApplicationError('Rename "%s" or use the --docker or --remote option to isolate tests.' % aws_config_path)
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ """Request AWS credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ credentials = response['aws']['credentials']
+
+ values = dict(
+ ACCESS_KEY=credentials['access_key'],
+ SECRET_KEY=credentials['secret_key'],
+ SECURITY_TOKEN=credentials['session_token'],
+ REGION='us-east-1',
+ )
+
+ display.sensitive.add(values['SECRET_KEY'])
+ display.sensitive.add(values['SECURITY_TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'aws', 'sts', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class AwsCloudEnvironment(CloudEnvironment):
+ """AWS cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('aws_secret_key'))
+ display.sensitive.add(ansible_vars.get('security_token'))
+
+ if 'aws_cleanup' not in ansible_vars:
+ ansible_vars['aws_cleanup'] = not self.managed
+
+ env_vars = {'ANSIBLE_DEBUG_BOTOCORE_LOGS': 'True'}
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ callback_plugins=['aws_resource_actions'],
+ )
+
+ def on_failure(self, target, tries):
+ """
+ :type target: TestTarget
+ :type tries: int
+ """
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the IAM test policy may need to be updated. '
+ 'https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html#aws-permissions-for-integration-tests.'
+ % target.name)
diff --git a/test/lib/ansible_test/_internal/cloud/azure.py b/test/lib/ansible_test/_internal/cloud/azure.py
new file mode 100644
index 00000000..02465eed
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/azure.py
@@ -0,0 +1,213 @@
+"""Azure plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..http import (
+ HttpClient,
+ urlparse,
+ urlunparse,
+ parse_qs,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class AzureCloudProvider(CloudProvider):
+ """Azure cloud provider plugin. Sets up cloud resources before delegation."""
+ SHERLOCK_CONFIG_PATH = os.path.expanduser('~/.ansible-sherlock-ci.cfg')
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(AzureCloudProvider, self).__init__(args)
+
+ self.aci = None
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
+ return
+
+ super(AzureCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(AzureCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ get_config(self.config_path) # check required variables
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.aci:
+ self.aci.stop()
+
+ super(AzureCloudProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Request Azure credentials through Sherlock."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+ response = {}
+
+ if os.path.isfile(self.SHERLOCK_CONFIG_PATH):
+ sherlock_uri = read_text_file(self.SHERLOCK_CONFIG_PATH).splitlines()[0].strip() + '&rgcount=2'
+
+ parts = urlparse(sherlock_uri)
+ query_string = parse_qs(parts.query)
+ base_uri = urlunparse(parts[:4] + ('', ''))
+
+ if 'code' not in query_string:
+ example_uri = 'https://example.azurewebsites.net/api/sandbox-provisioning'
+ raise ApplicationError('The Sherlock URI must include the API key in the query string. Example: %s?code=xxx' % example_uri)
+
+ display.info('Initializing azure/sherlock from: %s' % base_uri, verbosity=1)
+
+ http = HttpClient(self.args)
+ result = http.get(sherlock_uri)
+
+ display.info('Started azure/sherlock from: %s' % base_uri, verbosity=1)
+
+ if not self.args.explain:
+ response = result.json()
+ else:
+ aci = self._create_ansible_core_ci()
+
+ aci_result = aci.start()
+
+ if not self.args.explain:
+ response = aci_result['azure']
+ self.aci = aci
+
+ if not self.args.explain:
+ values = dict(
+ AZURE_CLIENT_ID=response['clientId'],
+ AZURE_SECRET=response['clientSecret'],
+ AZURE_SUBSCRIPTION_ID=response['subscriptionId'],
+ AZURE_TENANT=response['tenantId'],
+ RESOURCE_GROUP=response['resourceGroupNames'][0],
+ RESOURCE_GROUP_SECONDARY=response['resourceGroupNames'][1],
+ )
+
+ display.sensitive.add(values['AZURE_SECRET'])
+
+ config = '\n'.join('%s: %s' % (key, values[key]) for key in sorted(values))
+
+ config = '[default]\n' + config
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'azure', 'azure', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class AzureCloudEnvironment(CloudEnvironment):
+ """Azure cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = get_config(self.config_path)
+
+ display.sensitive.add(env_vars.get('AZURE_SECRET'))
+ display.sensitive.add(env_vars.get('AZURE_PASSWORD'))
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
+
+ def on_failure(self, target, tries):
+ """
+ :type target: TestTarget
+ :type tries: int
+ """
+ if not tries and self.managed:
+ display.notice('If %s failed due to permissions, the test policy may need to be updated. '
+ 'For help, consult @mattclay or @gundalow on GitHub or #ansible-devel on IRC.' % target.name)
+
+
+def get_config(config_path):
+ """
+ :type config_path: str
+ :rtype: dict[str, str]
+ """
+ parser = ConfigParser()
+ parser.read(config_path)
+
+ config = dict((key.upper(), value) for key, value in parser.items('default'))
+
+ rg_vars = (
+ 'RESOURCE_GROUP',
+ 'RESOURCE_GROUP_SECONDARY',
+ )
+
+ sp_vars = (
+ 'AZURE_CLIENT_ID',
+ 'AZURE_SECRET',
+ 'AZURE_SUBSCRIPTION_ID',
+ 'AZURE_TENANT',
+ )
+
+ ad_vars = (
+ 'AZURE_AD_USER',
+ 'AZURE_PASSWORD',
+ 'AZURE_SUBSCRIPTION_ID',
+ )
+
+ rg_ok = all(var in config for var in rg_vars)
+ sp_ok = all(var in config for var in sp_vars)
+ ad_ok = all(var in config for var in ad_vars)
+
+ if not rg_ok:
+ raise ApplicationError('Resource groups must be defined with: %s' % ', '.join(sorted(rg_vars)))
+
+ if not sp_ok and not ad_ok:
+ raise ApplicationError('Credentials must be defined using either:\nService Principal: %s\nActive Directory: %s' % (
+ ', '.join(sorted(sp_vars)), ', '.join(sorted(ad_vars))))
+
+ return config
diff --git a/test/lib/ansible_test/_internal/cloud/cloudscale.py b/test/lib/ansible_test/_internal/cloud/cloudscale.py
new file mode 100644
index 00000000..8e5885b2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/cloudscale.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# (c) 2018, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+"""Cloudscale plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import ConfigParser, display
+
+
+class CloudscaleCloudProvider(CloudProvider):
+ """Cloudscale cloud provider plugin. Sets up cloud resources before
+ delegation.
+ """
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(CloudscaleCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(CloudscaleCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(CloudscaleCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ display.info('Using existing %s cloud config: %s'
+ % (self.platform, self.config_static_path),
+ verbosity=1)
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class CloudscaleCloudEnvironment(CloudEnvironment):
+ """Cloudscale cloud environment plugin. Updates integration test environment
+ after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ CLOUDSCALE_API_TOKEN=parser.get('default', 'cloudscale_api_token'),
+ )
+
+ display.sensitive.add(env_vars['CLOUDSCALE_API_TOKEN'])
+
+ ansible_vars = dict(
+ cloudscale_resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/cs.py b/test/lib/ansible_test/_internal/cloud/cs.py
new file mode 100644
index 00000000..d028d9c4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/cs.py
@@ -0,0 +1,300 @@
+"""CloudStack plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ ApplicationError,
+ display,
+ SubprocessError,
+ ConfigParser,
+)
+
+from ..http import (
+ HttpClient,
+ HttpError,
+ urlparse,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ docker_network_inspect,
+ docker_exec,
+ get_docker_container_id,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+
+class CsCloudProvider(CloudProvider):
+ """CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(CsCloudProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ self.image = 'quay.io/ansible/cloudstack-test-container:1.2.0'
+ self.container_name = ''
+ self.endpoint = ''
+ self.host = ''
+ self.port = 0
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(CsCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ if self.managed:
+ return ['-R', '8888:%s:8888' % get_docker_hostname()]
+
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ if self.ci_provider.code:
+ docker_rm(self.args, self.container_name)
+ elif not self.args.explain:
+ display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
+
+ super(CsCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ """Configure CloudStack tests for use with static configuration."""
+ parser = ConfigParser()
+ parser.read(self.config_static_path)
+
+ self.endpoint = parser.get('cloudstack', 'endpoint')
+
+ parts = urlparse(self.endpoint)
+
+ self.host = parts.hostname
+
+ if not self.host:
+ raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
+
+ if parts.port:
+ self.port = parts.port
+ elif parts.scheme == 'http':
+ self.port = 80
+ elif parts.scheme == 'https':
+ self.port = 443
+ else:
+ raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
+
+ display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
+
+ self._wait_for_service()
+
+ def _setup_dynamic(self):
+ """Create a CloudStack simulator using docker."""
+ config = self._read_config_template()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0]['State']['Running']:
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
+ else:
+ display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
+ docker_pull(self.args, self.image)
+ docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
+
+ # apply work-around for OverlayFS issue
+ # https://github.com/docker/for-linux/issues/72#issuecomment-319904698
+ docker_exec(self.args, self.container_name, ['find', '/var/lib/mysql', '-type', 'f', '-exec', 'touch', '{}', ';'])
+
+ if not self.args.explain:
+ display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.')
+
+ container_id = get_docker_container_id()
+
+ if container_id:
+ self.host = self._get_simulator_address()
+ display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
+ else:
+ self.host = get_docker_hostname()
+
+ self.port = 8888
+ self.endpoint = 'http://%s:%d' % (self.host, self.port)
+
+ self._wait_for_service()
+
+ if self.args.explain:
+ values = dict(
+ HOST=self.host,
+ PORT=str(self.port),
+ )
+ else:
+ credentials = self._get_credentials()
+
+ if self.args.docker:
+ host = self.DOCKER_SIMULATOR_NAME
+ elif self.args.remote:
+ host = 'localhost'
+ else:
+ host = self.host
+
+ values = dict(
+ HOST=host,
+ PORT=str(self.port),
+ KEY=credentials['apikey'],
+ SECRET=credentials['secretkey'],
+ )
+
+ display.sensitive.add(values['SECRET'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _get_simulator_address(self):
+ current_network = get_docker_preferred_network_name(self.args)
+ networks = docker_network_inspect(self.args, current_network)
+
+ try:
+ network = [network for network in networks if network['Name'] == current_network][0]
+ containers = network['Containers']
+ container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
+ return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
+ except Exception:
+ display.error('Failed to process the following docker network inspect output:\n%s' %
+ json.dumps(networks, indent=4, sort_keys=True))
+ raise
+
+ def _wait_for_service(self):
+ """Wait for the CloudStack service endpoint to accept connections."""
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True)
+ endpoint = self.endpoint
+
+ for _iteration in range(1, 30):
+ display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for CloudStack service.')
+
+ def _get_credentials(self):
+ """Wait for the CloudStack simulator to return credentials.
+ :rtype: dict[str, str]
+ """
+ client = HttpClient(self.args, always=True)
+ endpoint = '%s/admin.json' % self.endpoint
+
+ for _iteration in range(1, 30):
+ display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
+
+ response = client.get(endpoint)
+
+ if response.status_code == 200:
+ try:
+ return response.json()
+ except HttpError as ex:
+ display.error(ex)
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for CloudStack credentials.')
+
+
+class CsCloudEnvironment(CloudEnvironment):
+ """CloudStack cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ config = dict(parser.items('default'))
+
+ env_vars = dict(
+ CLOUDSTACK_ENDPOINT=config['endpoint'],
+ CLOUDSTACK_KEY=config['key'],
+ CLOUDSTACK_SECRET=config['secret'],
+ CLOUDSTACK_TIMEOUT=config['timeout'],
+ )
+
+ display.sensitive.add(env_vars['CLOUDSTACK_SECRET'])
+
+ ansible_vars = dict(
+ cs_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/fallaxy.py b/test/lib/ansible_test/_internal/cloud/fallaxy.py
new file mode 100644
index 00000000..504094bd
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/fallaxy.py
@@ -0,0 +1,177 @@
+"""Fallaxy (ansible-galaxy) plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import uuid
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+)
+
+
+class FallaxyProvider(CloudProvider):
+ """Fallaxy plugin.
+
+ Sets up Fallaxy (ansible-galaxy) stub server for tests.
+
+ It's source source itself resides at: https://github.com/ansible/fallaxy-test-container
+ """
+
+ DOCKER_SIMULATOR_NAME = 'fallaxy-stub'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(FallaxyProvider, self).__init__(args)
+
+ if os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_FALLAXY_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/fallaxy-test-container:2.0.1'
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(FallaxyProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ return ['--link', self.DOCKER_SIMULATOR_NAME] if self.managed else []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(FallaxyProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ container_id = get_docker_container_id()
+
+ if container_id:
+ display.info('Running in docker container: %s' % container_id, verbosity=1)
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info('%s Fallaxy simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1)
+
+ fallaxy_port = 8080
+ fallaxy_token = str(uuid.uuid4()).replace('-', '')
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(fallaxy_port),) * 2),
+ ]
+
+ if not os.environ.get('ANSIBLE_FALLAXY_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name, '-e', 'FALLAXY_TOKEN=%s' % fallaxy_token] + publish_ports,
+ )
+
+ if self.args.docker:
+ fallaxy_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ fallaxy_host = self._get_simulator_address()
+ display.info('Found Fallaxy simulator container address: %s' % fallaxy_host, verbosity=1)
+ else:
+ fallaxy_host = 'localhost'
+
+ self._set_cloud_config('FALLAXY_HOST', fallaxy_host)
+ self._set_cloud_config('FALLAXY_PORT', str(fallaxy_port))
+ self._set_cloud_config('FALLAXY_TOKEN', fallaxy_token)
+
+ def _get_simulator_address(self):
+ results = docker_inspect(self.args, self.container_name)
+ ipaddress = results[0]['NetworkSettings']['IPAddress']
+ return ipaddress
+
+ def _setup_static(self):
+ raise NotImplementedError()
+
+
+class FallaxyEnvironment(CloudEnvironment):
+ """Fallaxy environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ fallaxy_token = self._get_cloud_config('FALLAXY_TOKEN')
+ fallaxy_host = self._get_cloud_config('FALLAXY_HOST')
+ fallaxy_port = self._get_cloud_config('FALLAXY_PORT')
+
+ return CloudEnvironmentConfig(
+ ansible_vars=dict(
+ fallaxy_token=fallaxy_token,
+ fallaxy_galaxy_server='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
+ fallaxy_ah_server='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
+ ),
+ env_vars=dict(
+ FALLAXY_TOKEN=fallaxy_token,
+ FALLAXY_GALAXY_SERVER='http://%s:%s/api/' % (fallaxy_host, fallaxy_port),
+ FALLAXY_AH_SERVER='http://%s:%s/api/automation-hub/' % (fallaxy_host, fallaxy_port),
+ ),
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/foreman.py b/test/lib/ansible_test/_internal/cloud/foreman.py
new file mode 100644
index 00000000..7517f1f6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/foreman.py
@@ -0,0 +1,191 @@
+"""Foreman plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class ForemanProvider(CloudProvider):
+ """Foreman plugin.
+
+ Sets up Foreman stub server for tests.
+ """
+
+ DOCKER_SIMULATOR_NAME = 'foreman-stub'
+
+ DOCKER_IMAGE = 'quay.io/ansible/foreman-test-container:1.4.0'
+ """Default image to run Foreman stub from.
+
+ The simulator must be pinned to a specific version
+ to guarantee CI passes with the version used.
+
+ It's source source itself resides at:
+ https://github.com/ansible/foreman-test-container
+ """
+
+ def __init__(self, args):
+ """Set up container references for provider.
+
+ :type args: TestConfig
+ """
+ super(ForemanProvider, self).__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_FRMNSIM_CONTAINER')
+ """Overrides target container, might be used for development.
+
+ Use ANSIBLE_FRMNSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning(
+ 'Excluding tests marked "%s" '
+ 'which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
+ )
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(ForemanProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(ForemanProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Spawn a Foreman stub within docker container."""
+ foreman_port = 8080
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info(
+ '%s Foreman simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1,
+ )
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(foreman_port), ) * 2),
+ ]
+
+ if not self.__container_from_env:
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ foreman_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ foreman_host = self._get_simulator_address()
+ display.info(
+ 'Found Foreman simulator container address: %s'
+ % foreman_host, verbosity=1
+ )
+ else:
+ foreman_host = get_docker_hostname()
+
+ self._set_cloud_config('FOREMAN_HOST', foreman_host)
+ self._set_cloud_config('FOREMAN_PORT', str(foreman_port))
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError
+
+
+class ForemanEnvironment(CloudEnvironment):
+ """Foreman environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = dict(
+ FOREMAN_HOST=self._get_cloud_config('FOREMAN_HOST'),
+ FOREMAN_PORT=self._get_cloud_config('FOREMAN_PORT'),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/gcp.py b/test/lib/ansible_test/_internal/cloud/gcp.py
new file mode 100644
index 00000000..c8de1835
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/gcp.py
@@ -0,0 +1,62 @@
+# Copyright: (c) 2018, Google Inc.
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""GCP plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+
+class GcpCloudProvider(CloudProvider):
+ """GCP cloud provider plugin. Sets up cloud resources before delegation."""
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(GcpCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(GcpCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ display.notice(
+ 'static configuration could not be used. are you missing a template file?'
+ )
+
+
+class GcpCloudEnvironment(CloudEnvironment):
+ """GCP cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/hcloud.py b/test/lib/ansible_test/_internal/cloud/hcloud.py
new file mode 100644
index 00000000..5902b566
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/hcloud.py
@@ -0,0 +1,116 @@
+"""Hetzner Cloud plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class HcloudCloudProvider(CloudProvider):
+ """Hetzner Cloud provider plugin. Sets up cloud resources before
+ delegation.
+ """
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(HcloudCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = self._create_ansible_core_ci()
+
+ if aci.available:
+ return
+
+ super(HcloudCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(HcloudCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ """Request Hetzner credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ config = self._read_config_template()
+
+ aci = self._create_ansible_core_ci()
+
+ response = aci.start()
+
+ if not self.args.explain:
+ token = response['hetzner']['token']
+
+ display.sensitive.add(token)
+ display.info('Hetzner Cloud Token: %s' % token, verbosity=1)
+
+ values = dict(
+ TOKEN=token,
+ )
+
+ display.sensitive.add(values['TOKEN'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+ def _create_ansible_core_ci(self):
+ """
+ :rtype: AnsibleCoreCI
+ """
+ return AnsibleCoreCI(self.args, 'hetzner', 'hetzner', persist=False, stage=self.args.remote_stage, provider=self.args.remote_provider)
+
+
+class HcloudCloudEnvironment(CloudEnvironment):
+ """Hetzner Cloud cloud environment plugin. Updates integration test environment
+ after delegation.
+ """
+
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'),
+ )
+
+ display.sensitive.add(env_vars['HCLOUD_TOKEN'])
+
+ ansible_vars = dict(
+ hcloud_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items()))
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/nios.py b/test/lib/ansible_test/_internal/cloud/nios.py
new file mode 100644
index 00000000..b9a1a4e4
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/nios.py
@@ -0,0 +1,193 @@
+"""NIOS plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class NiosProvider(CloudProvider):
+ """Nios plugin.
+
+ Sets up NIOS mock server for tests.
+ """
+
+ DOCKER_SIMULATOR_NAME = 'nios-simulator'
+
+ DOCKER_IMAGE = 'quay.io/ansible/nios-test-container:1.3.0'
+ """Default image to run the nios simulator.
+
+ The simulator must be pinned to a specific version
+ to guarantee CI passes with the version used.
+
+ It's source source itself resides at:
+ https://github.com/ansible/nios-test-container
+ """
+
+ def __init__(self, args):
+ """Set up container references for provider.
+
+ :type args: TestConfig
+ """
+ super(NiosProvider, self).__init__(args)
+
+ self.__container_from_env = os.environ.get('ANSIBLE_NIOSSIM_CONTAINER')
+ """Overrides target container, might be used for development.
+
+ Use ANSIBLE_NIOSSIM_CONTAINER=whatever_you_want if you want
+ to use other image. Omit/empty otherwise.
+ """
+
+ self.image = self.__container_from_env or self.DOCKER_IMAGE
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the tests with the necessary config and res unavailable.
+
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ docker_cmd = 'docker'
+ docker = find_executable(docker_cmd, required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning(
+ 'Excluding tests marked "%s" '
+ 'which require the "%s" command: %s'
+ % (skip.rstrip('/'), docker_cmd, ', '.join(skipped))
+ )
+
+ def setup(self):
+ """Setup cloud resource before delegation and reg cleanup callback."""
+ super(NiosProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_docker_run_options(self):
+ """Get additional options needed when delegating tests to a container.
+
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the resource and temporary configs files after tests."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(NiosProvider, self).cleanup()
+
+ def _setup_dynamic(self):
+ """Spawn a NIOS simulator within docker container."""
+ nios_port = 443
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ display.info(
+ '%s NIOS simulator docker container.'
+ % ('Using the existing' if results else 'Starting a new'),
+ verbosity=1,
+ )
+
+ if not results:
+ if self.args.docker or container_id:
+ publish_ports = []
+ else:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', ':'.join((str(nios_port), ) * 2),
+ ]
+
+ if not self.__container_from_env:
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ nios_host = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ nios_host = self._get_simulator_address()
+ display.info(
+ 'Found NIOS simulator container address: %s'
+ % nios_host, verbosity=1
+ )
+ else:
+ nios_host = get_docker_hostname()
+
+ self._set_cloud_config('NIOS_HOST', nios_host)
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ raise NotImplementedError
+
+
+class NiosEnvironment(CloudEnvironment):
+ """NIOS environment plugin.
+
+ Updates integration test environment after delegation.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ ansible_vars = dict(
+ nios_provider=dict(
+ host=self._get_cloud_config('NIOS_HOST'),
+ username='admin',
+ password='infoblox',
+ ),
+ )
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/opennebula.py b/test/lib/ansible_test/_internal/cloud/opennebula.py
new file mode 100644
index 00000000..559093e3
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/opennebula.py
@@ -0,0 +1,66 @@
+"""OpenNebula plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ display,
+ ConfigParser,
+)
+
+
+class OpenNebulaCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def filter(self, targets, exclude):
+ """ no need to filter modules, they can either run from config file or from fixtures"""
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(OpenNebulaCloudProvider, self).setup()
+
+ if not self._use_static_config():
+ self._setup_dynamic()
+
+ def _setup_dynamic(self):
+ display.info('No config file provided, will run test from fixtures')
+
+ config = self._read_config_template()
+ values = dict(
+ URL="http://localhost/RPC2",
+ USERNAME='oneadmin',
+ PASSWORD='onepass',
+ FIXTURES='true',
+ REPLAY='true',
+ )
+ config = self._populate_config_template(config, values)
+ self._write_config(config)
+
+
+class OpenNebulaCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+
+ ansible_vars.update(dict(parser.items('default')))
+
+ display.sensitive.add(ansible_vars.get('opennebula_password'))
+
+ return CloudEnvironmentConfig(
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/openshift.py b/test/lib/ansible_test/_internal/cloud/openshift.py
new file mode 100644
index 00000000..450816bf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/openshift.py
@@ -0,0 +1,236 @@
+"""OpenShift plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import time
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ find_executable,
+ ApplicationError,
+ display,
+ SubprocessError,
+)
+
+from ..http import (
+ HttpClient,
+)
+
+from ..docker_util import (
+ docker_exec,
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ docker_network_inspect,
+ get_docker_container_id,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+
+class OpenShiftCloudProvider(CloudProvider):
+ """OpenShift cloud provider plugin. Sets up cloud resources before delegation."""
+ DOCKER_CONTAINER_NAME = 'openshift-origin'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(OpenShiftCloudProvider, self).__init__(args, config_extension='.kubeconfig')
+
+ # The image must be pinned to a specific version to guarantee CI passes with the version used.
+ self.image = 'openshift/origin:v3.9.0'
+ self.container_name = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(OpenShiftCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def get_remote_ssh_options(self):
+ """Get any additional options needed when delegating tests to a remote instance via SSH.
+ :rtype: list[str]
+ """
+ if self.managed:
+ return ['-R', '8443:%s:8443' % get_docker_hostname()]
+
+ return []
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_CONTAINER_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(OpenShiftCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ """Configure OpenShift tests for use with static configuration."""
+ config = read_text_file(self.config_static_path)
+
+ match = re.search(r'^ *server: (?P<server>.*)$', config, flags=re.MULTILINE)
+
+ if match:
+ endpoint = match.group('server')
+ self._wait_for_service(endpoint)
+ else:
+ display.warning('Could not find OpenShift endpoint in kubeconfig. Skipping check for OpenShift service availability.')
+
+ def _setup_dynamic(self):
+ """Create a OpenShift container using docker."""
+ self.container_name = self.DOCKER_CONTAINER_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0]['State']['Running']:
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing OpenShift docker container.', verbosity=1)
+ else:
+ display.info('Starting a new OpenShift docker container.', verbosity=1)
+ docker_pull(self.args, self.image)
+ cmd = ['start', 'master', '--listen', 'https://0.0.0.0:8443']
+ docker_run(self.args, self.image, ['-d', '-p', '8443:8443', '--name', self.container_name], cmd)
+
+ container_id = get_docker_container_id()
+
+ if container_id:
+ host = self._get_container_address()
+ display.info('Found OpenShift container address: %s' % host, verbosity=1)
+ else:
+ host = get_docker_hostname()
+
+ port = 8443
+ endpoint = 'https://%s:%s/' % (host, port)
+
+ self._wait_for_service(endpoint)
+
+ if self.args.explain:
+ config = '# Unknown'
+ else:
+ if self.args.docker:
+ host = self.DOCKER_CONTAINER_NAME
+ elif self.args.remote:
+ host = 'localhost'
+
+ server = 'https://%s:%s' % (host, port)
+ config = self._get_config(server)
+
+ self._write_config(config)
+
+ def _get_container_address(self):
+ current_network = get_docker_preferred_network_name(self.args)
+ networks = docker_network_inspect(self.args, current_network)
+
+ try:
+ network = [network for network in networks if network['Name'] == current_network][0]
+ containers = network['Containers']
+ container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_CONTAINER_NAME][0]
+ return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
+ except Exception:
+ display.error('Failed to process the following docker network inspect output:\n%s' %
+ json.dumps(networks, indent=4, sort_keys=True))
+ raise
+
+ def _wait_for_service(self, endpoint):
+ """Wait for the OpenShift service endpoint to accept connections.
+ :type endpoint: str
+ """
+ if self.args.explain:
+ return
+
+ client = HttpClient(self.args, always=True, insecure=True)
+
+ for dummy in range(1, 30):
+ display.info('Waiting for OpenShift service: %s' % endpoint, verbosity=1)
+
+ try:
+ client.get(endpoint)
+ return
+ except SubprocessError:
+ pass
+
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for OpenShift service.')
+
+ def _get_config(self, server):
+ """Get OpenShift config from container.
+ :type server: str
+ :rtype: dict[str, str]
+ """
+ cmd = ['cat', '/var/lib/origin/openshift.local.config/master/admin.kubeconfig']
+
+ stdout, dummy = docker_exec(self.args, self.container_name, cmd, capture=True)
+
+ config = stdout
+ config = re.sub(r'^( *)certificate-authority-data: .*$', r'\1insecure-skip-tls-verify: true', config, flags=re.MULTILINE)
+ config = re.sub(r'^( *)server: .*$', r'\1server: %s' % server, config, flags=re.MULTILINE)
+
+ return config
+
+
+class OpenShiftCloudEnvironment(CloudEnvironment):
+ """OpenShift cloud environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ env_vars = dict(
+ K8S_AUTH_KUBECONFIG=self.config_path,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/scaleway.py b/test/lib/ansible_test/_internal/cloud/scaleway.py
new file mode 100644
index 00000000..22abe197
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/scaleway.py
@@ -0,0 +1,72 @@
+"""Scaleway plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ ConfigParser,
+ display,
+)
+
+
+class ScalewayCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(ScalewayCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(ScalewayCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(ScalewayCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class ScalewayCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ SCW_API_KEY=parser.get('default', 'key'),
+ SCW_ORG=parser.get('default', 'org')
+ )
+
+ display.sensitive.add(env_vars['SCW_API_KEY'])
+
+ ansible_vars = dict(
+ scw_org=parser.get('default', 'org'),
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/tower.py b/test/lib/ansible_test/_internal/cloud/tower.py
new file mode 100644
index 00000000..227d170c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/tower.py
@@ -0,0 +1,255 @@
+"""Tower plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from ..util import (
+ display,
+ ApplicationError,
+ SubprocessError,
+ ConfigParser,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..core_ci import (
+ AnsibleCoreCI,
+)
+
+
+class TowerCloudProvider(CloudProvider):
+ """Tower cloud provider plugin. Sets up cloud resources before delegation."""
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(TowerCloudProvider, self).__init__(args)
+
+ self.aci = None
+ self.version = ''
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ aci = get_tower_aci(self.args)
+
+ if aci.available:
+ return
+
+ super(TowerCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(TowerCloudProvider, self).setup()
+
+ if self._use_static_config():
+ self._setup_static()
+ else:
+ self._setup_dynamic()
+
+ def check_tower_version(self, fallback=None):
+ """Check the Tower version being tested and determine the correct CLI version to use.
+ :type fallback: str | None
+ """
+ tower_cli_version_map = {
+ '3.1.5': '3.1.8',
+ '3.2.3': '3.3.0',
+ '3.3.5': '3.3.3',
+ '3.4.3': '3.3.3',
+ '3.6.3': '3.3.8',
+ }
+
+ cli_version = tower_cli_version_map.get(self.version, fallback)
+
+ if not cli_version:
+ raise ApplicationError('Mapping to ansible-tower-cli version required for Tower version: %s' % self.version)
+
+ self._set_cloud_config('tower_cli_version', cli_version)
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ # cleanup on success or failure is not yet supported due to how cleanup is called
+ if self.aci and self.args.remote_terminate == 'always':
+ self.aci.stop()
+
+ super(TowerCloudProvider, self).cleanup()
+
+ def _setup_static(self):
+ config = TowerConfig.parse(self.config_static_path)
+
+ self.version = config.version
+ self.check_tower_version()
+
+ def _setup_dynamic(self):
+ """Request Tower credentials through the Ansible Core CI service."""
+ display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)
+
+ # temporary solution to allow version selection
+ self.version = os.environ.get('TOWER_VERSION', '3.6.3')
+ self.check_tower_version(os.environ.get('TOWER_CLI_VERSION'))
+
+ aci = get_tower_aci(self.args, self.version)
+ aci.start()
+ aci.wait()
+
+ connection = aci.get()
+
+ config = self._read_config_template()
+
+ if not self.args.explain:
+ self.aci = aci
+
+ values = dict(
+ VERSION=self.version,
+ HOST=connection.hostname,
+ USERNAME=connection.username,
+ PASSWORD=connection.password,
+ )
+
+ display.sensitive.add(values['PASSWORD'])
+
+ config = self._populate_config_template(config, values)
+
+ self._write_config(config)
+
+
+class TowerCloudEnvironment(CloudEnvironment):
+ """Tower cloud environment plugin. Updates integration test environment after delegation."""
+ def setup(self):
+ """Setup which should be done once per environment instead of once per test target."""
+ self.setup_cli()
+ self.disable_pendo()
+
+ def setup_cli(self):
+ """Install the correct Tower CLI for the version of Tower being tested."""
+ tower_cli_version = self._get_cloud_config('tower_cli_version')
+
+ display.info('Installing Tower CLI version: %s' % tower_cli_version)
+
+ cmd = self.args.pip_command + ['install', '--disable-pip-version-check', 'ansible-tower-cli==%s' % tower_cli_version]
+
+ run_command(self.args, cmd)
+
+ cmd = ['tower-cli', 'config', 'verify_ssl', 'false']
+ run_command(self.args, cmd, capture=True)
+
+ def disable_pendo(self):
+ """Disable Pendo tracking."""
+ display.info('Disable Pendo tracking')
+
+ config = TowerConfig.parse(self.config_path)
+
+ # tower-cli does not recognize TOWER_ environment variables
+ cmd = ['tower-cli', 'setting', 'modify', 'PENDO_TRACKING_STATE', 'off',
+ '-h', config.host, '-u', config.username, '-p', config.password]
+
+ attempts = 60
+
+ while True:
+ attempts -= 1
+
+ try:
+ run_command(self.args, cmd, capture=True)
+ return
+ except SubprocessError as ex:
+ if not attempts:
+ raise ApplicationError('Timed out trying to disable Pendo tracking:\n%s' % ex)
+
+ time.sleep(5)
+
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ config = TowerConfig.parse(self.config_path)
+
+ env_vars = config.environment
+
+ ansible_vars = dict((key.lower(), value) for key, value in env_vars.items())
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
+
+
+class TowerConfig:
+ """Tower settings."""
+ def __init__(self, values):
+ self.version = values.get('version')
+ self.host = values.get('host')
+ self.username = values.get('username')
+ self.password = values.get('password')
+
+ if self.password:
+ display.sensitive.add(self.password)
+
+ @property
+ def environment(self):
+ """Tower settings as environment variables.
+ :rtype: dict[str, str]
+ """
+ env = dict(
+ TOWER_VERSION=self.version,
+ TOWER_HOST=self.host,
+ TOWER_USERNAME=self.username,
+ TOWER_PASSWORD=self.password,
+ )
+
+ return env
+
+ @staticmethod
+ def parse(path):
+ """
+ :type path: str
+ :rtype: TowerConfig
+ """
+ parser = ConfigParser()
+ parser.read(path)
+
+ keys = (
+ 'version',
+ 'host',
+ 'username',
+ 'password',
+ )
+
+ values = dict((k, parser.get('default', k)) for k in keys)
+ config = TowerConfig(values)
+
+ missing = [k for k in keys if not values.get(k)]
+
+ if missing:
+ raise ApplicationError('Missing or empty Tower configuration value(s): %s' % ', '.join(missing))
+
+ return config
+
+
+def get_tower_aci(args, version=None):
+ """
+ :type args: EnvironmentConfig
+ :type version: str | None
+ :rtype: AnsibleCoreCI
+ """
+ if version:
+ persist = True
+ else:
+ version = ''
+ persist = False
+
+ return AnsibleCoreCI(args, 'tower', version, persist=persist, stage=args.remote_stage, provider=args.remote_provider)
diff --git a/test/lib/ansible_test/_internal/cloud/vcenter.py b/test/lib/ansible_test/_internal/cloud/vcenter.py
new file mode 100644
index 00000000..3b38a19e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/vcenter.py
@@ -0,0 +1,232 @@
+"""VMware vCenter plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ find_executable,
+ display,
+ ConfigParser,
+ ApplicationError,
+)
+
+from ..docker_util import (
+ docker_run,
+ docker_rm,
+ docker_inspect,
+ docker_pull,
+ get_docker_container_id,
+ get_docker_hostname,
+ get_docker_container_ip,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+
+class VcenterProvider(CloudProvider):
+ """VMware vcenter/esx plugin. Sets up cloud resources for tests."""
+ DOCKER_SIMULATOR_NAME = 'vcenter-simulator'
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(VcenterProvider, self).__init__(args)
+
+ # The simulator must be pinned to a specific version to guarantee CI passes with the version used.
+ if os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
+ self.image = os.environ.get('ANSIBLE_VCSIM_CONTAINER')
+ else:
+ self.image = 'quay.io/ansible/vcenter-test-container:1.7.0'
+ self.container_name = ''
+
+ # VMware tests can be run on govcsim or BYO with a static config file.
+ # The simulator is the default if no config is provided.
+ self.vmware_test_platform = os.environ.get('VMWARE_TEST_PLATFORM', 'govcsim')
+ self.insecure = False
+ self.proxy = None
+ self.platform = 'vcenter'
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if self.vmware_test_platform == 'govcsim' or (self.vmware_test_platform == '' and not os.path.isfile(self.config_static_path)):
+ docker = find_executable('docker', required=False)
+
+ if docker:
+ return
+
+ skip = 'cloud/%s/' % self.platform
+ skipped = [target.name for target in targets if skip in target.aliases]
+
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
+ % (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
+ elif self.vmware_test_platform == 'static':
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(VcenterProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(VcenterProvider, self).setup()
+
+ self._set_cloud_config('vmware_test_platform', self.vmware_test_platform)
+ if self.vmware_test_platform == 'govcsim':
+ self._setup_dynamic_simulator()
+ self.managed = True
+ elif self.vmware_test_platform == 'static':
+ self._use_static_config()
+ self._setup_static()
+ else:
+ raise ApplicationError('Unknown vmware_test_platform: %s' % self.vmware_test_platform)
+
+ def get_docker_run_options(self):
+ """Get any additional options needed when delegating tests to a docker container.
+ :rtype: list[str]
+ """
+ network = get_docker_preferred_network_name(self.args)
+
+ if self.managed and not is_docker_user_defined_network(network):
+ return ['--link', self.DOCKER_SIMULATOR_NAME]
+
+ return []
+
+ def cleanup(self):
+ """Clean up the cloud resource and any temporary configuration files after tests complete."""
+ if self.container_name:
+ docker_rm(self.args, self.container_name)
+
+ super(VcenterProvider, self).cleanup()
+
+ def _setup_dynamic_simulator(self):
+ """Create a vcenter simulator using docker."""
+ container_id = get_docker_container_id()
+
+ self.container_name = self.DOCKER_SIMULATOR_NAME
+
+ results = docker_inspect(self.args, self.container_name)
+
+ if results and not results[0].get('State', {}).get('Running'):
+ docker_rm(self.args, self.container_name)
+ results = []
+
+ if results:
+ display.info('Using the existing vCenter simulator docker container.', verbosity=1)
+ else:
+ display.info('Starting a new vCenter simulator docker container.', verbosity=1)
+
+ if not self.args.docker and not container_id:
+ # publish the simulator ports when not running inside docker
+ publish_ports = [
+ '-p', '1443:443',
+ '-p', '8080:8080',
+ '-p', '8989:8989',
+ '-p', '5000:5000', # control port for flask app in simulator
+ ]
+ else:
+ publish_ports = []
+
+ if not os.environ.get('ANSIBLE_VCSIM_CONTAINER'):
+ docker_pull(self.args, self.image)
+
+ docker_run(
+ self.args,
+ self.image,
+ ['-d', '--name', self.container_name] + publish_ports,
+ )
+
+ if self.args.docker:
+ vcenter_hostname = self.DOCKER_SIMULATOR_NAME
+ elif container_id:
+ vcenter_hostname = self._get_simulator_address()
+ display.info('Found vCenter simulator container address: %s' % vcenter_hostname, verbosity=1)
+ else:
+ vcenter_hostname = get_docker_hostname()
+
+ self._set_cloud_config('vcenter_hostname', vcenter_hostname)
+
+ def _get_simulator_address(self):
+ return get_docker_container_ip(self.args, self.container_name)
+
+ def _setup_static(self):
+ if not os.path.exists(self.config_static_path):
+ raise ApplicationError('Configuration file does not exist: %s' % self.config_static_path)
+
+ parser = ConfigParser({
+ 'vcenter_port': '443',
+ 'vmware_proxy_host': '',
+ 'vmware_proxy_port': '8080'})
+ parser.read(self.config_static_path)
+
+ if parser.get('DEFAULT', 'vmware_validate_certs').lower() in ('no', 'false'):
+ self.insecure = True
+ proxy_host = parser.get('DEFAULT', 'vmware_proxy_host')
+ proxy_port = int(parser.get('DEFAULT', 'vmware_proxy_port'))
+ if proxy_host and proxy_port:
+ self.proxy = 'http://%s:%d' % (proxy_host, proxy_port)
+
+
+class VcenterEnvironment(CloudEnvironment):
+ """VMware vcenter/esx environment plugin. Updates integration test environment after delegation."""
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ try:
+ # We may be in a container, so we cannot just reach VMWARE_TEST_PLATFORM,
+ # We do a try/except instead
+ parser = ConfigParser()
+ parser.read(self.config_path) # static
+
+ env_vars = dict()
+ ansible_vars = dict(
+ resource_prefix=self.resource_prefix,
+ )
+ ansible_vars.update(dict(parser.items('DEFAULT', raw=True)))
+ except KeyError: # govcsim
+ env_vars = dict(
+ VCENTER_HOSTNAME=self._get_cloud_config('vcenter_hostname'),
+ VCENTER_USERNAME='user',
+ VCENTER_PASSWORD='pass',
+ )
+
+ ansible_vars = dict(
+ vcsim=self._get_cloud_config('vcenter_hostname'),
+ vcenter_hostname=self._get_cloud_config('vcenter_hostname'),
+ vcenter_username='user',
+ vcenter_password='pass',
+ )
+ # Shippable starts ansible-test from withing an existing container,
+ # and in this case, we don't have to change the vcenter port.
+ if not self.args.docker and not get_docker_container_id():
+ ansible_vars['vcenter_port'] = '1443'
+
+ for key, value in ansible_vars.items():
+ if key.endswith('_password'):
+ display.sensitive.add(value)
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ module_defaults={
+ 'group/vmware': {
+ 'hostname': ansible_vars['vcenter_hostname'],
+ 'username': ansible_vars['vcenter_username'],
+ 'password': ansible_vars['vcenter_password'],
+ 'port': ansible_vars.get('vcenter_port', '443'),
+ 'validate_certs': ansible_vars.get('vmware_validate_certs', 'no'),
+ },
+ },
+ )
diff --git a/test/lib/ansible_test/_internal/cloud/vultr.py b/test/lib/ansible_test/_internal/cloud/vultr.py
new file mode 100644
index 00000000..ce6184f7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/cloud/vultr.py
@@ -0,0 +1,71 @@
+"""Vultr plugin for integration tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import (
+ CloudProvider,
+ CloudEnvironment,
+ CloudEnvironmentConfig,
+)
+
+from ..util import (
+ ConfigParser,
+ display,
+)
+
+
+class VultrCloudProvider(CloudProvider):
+ """Checks if a configuration file has been passed or fixtures are going to be used for testing"""
+
+ def __init__(self, args):
+ """
+ :type args: TestConfig
+ """
+ super(VultrCloudProvider, self).__init__(args)
+
+ def filter(self, targets, exclude):
+ """Filter out the cloud tests when the necessary config and resources are not available.
+ :type targets: tuple[TestTarget]
+ :type exclude: list[str]
+ """
+ if os.path.isfile(self.config_static_path):
+ return
+
+ super(VultrCloudProvider, self).filter(targets, exclude)
+
+ def setup(self):
+ """Setup the cloud resource before delegation and register a cleanup callback."""
+ super(VultrCloudProvider, self).setup()
+
+ if os.path.isfile(self.config_static_path):
+ self.config_path = self.config_static_path
+ self.managed = False
+
+
+class VultrCloudEnvironment(CloudEnvironment):
+ """
+ Updates integration test environment after delegation. Will setup the config file as parameter.
+ """
+ def get_environment_config(self):
+ """
+ :rtype: CloudEnvironmentConfig
+ """
+ parser = ConfigParser()
+ parser.read(self.config_path)
+
+ env_vars = dict(
+ VULTR_API_KEY=parser.get('default', 'key'),
+ )
+
+ display.sensitive.add(env_vars['VULTR_API_KEY'])
+
+ ansible_vars = dict(
+ vultr_resource_prefix=self.resource_prefix,
+ )
+
+ return CloudEnvironmentConfig(
+ env_vars=env_vars,
+ ansible_vars=ansible_vars,
+ )
diff --git a/test/lib/ansible_test/_internal/config.py b/test/lib/ansible_test/_internal/config.py
new file mode 100644
index 00000000..a3c31959
--- /dev/null
+++ b/test/lib/ansible_test/_internal/config.py
@@ -0,0 +1,356 @@
+"""Configuration classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from . import types as t
+
+from .util import (
+ find_python,
+ generate_pip_command,
+ ApplicationError,
+)
+
+from .util_common import (
+ docker_qualify_image,
+ get_docker_completion,
+ get_remote_completion,
+ CommonConfig,
+)
+
+from .metadata import (
+ Metadata,
+)
+
+from .data import (
+ data_context,
+)
+
+try:
+ TIntegrationConfig = t.TypeVar('TIntegrationConfig', bound='IntegrationConfig')
+except AttributeError:
+ TIntegrationConfig = None # pylint: disable=invalid-name
+
+
+class ParsedRemote:
+ """A parsed version of a "remote" string."""
+ def __init__(self, arch, platform, version): # type: (t.Optional[str], str, str) -> None
+ self.arch = arch
+ self.platform = platform
+ self.version = version
+
+ @staticmethod
+ def parse(value): # type: (str) -> t.Optional['ParsedRemote']
+ """Return a ParsedRemote from the given value or None if the syntax is invalid."""
+ parts = value.split('/')
+
+ if len(parts) == 2:
+ arch = None
+ platform, version = parts
+ elif len(parts) == 3:
+ arch, platform, version = parts
+ else:
+ return None
+
+ return ParsedRemote(arch, platform, version)
+
+
+class EnvironmentConfig(CommonConfig):
+ """Configuration common to all commands which execute in an environment."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(EnvironmentConfig, self).__init__(args, command)
+
+ self.local = args.local is True
+ self.venv = args.venv
+ self.venv_system_site_packages = args.venv_system_site_packages
+
+ self.python = args.python if 'python' in args else None # type: str
+
+ self.docker = docker_qualify_image(args.docker) # type: str
+ self.docker_raw = args.docker # type: str
+ self.remote = args.remote # type: str
+
+ if self.remote:
+ self.parsed_remote = ParsedRemote.parse(self.remote)
+
+ if not self.parsed_remote or not self.parsed_remote.platform or not self.parsed_remote.version:
+ raise ApplicationError('Unrecognized remote "%s" syntax. Use "platform/version" or "arch/platform/version".' % self.remote)
+ else:
+ self.parsed_remote = None
+
+ self.docker_privileged = args.docker_privileged if 'docker_privileged' in args else False # type: bool
+ self.docker_pull = args.docker_pull if 'docker_pull' in args else False # type: bool
+ self.docker_keep_git = args.docker_keep_git if 'docker_keep_git' in args else False # type: bool
+ self.docker_seccomp = args.docker_seccomp if 'docker_seccomp' in args else None # type: str
+ self.docker_memory = args.docker_memory if 'docker_memory' in args else None
+ self.docker_terminate = args.docker_terminate if 'docker_terminate' in args else None # type: str
+ self.docker_network = args.docker_network if 'docker_network' in args else None # type: str
+
+ if self.docker_seccomp is None:
+ self.docker_seccomp = get_docker_completion().get(self.docker_raw, {}).get('seccomp', 'default')
+
+ self.remote_stage = args.remote_stage # type: str
+ self.remote_provider = args.remote_provider # type: str
+ self.remote_endpoint = args.remote_endpoint # type: t.Optional[str]
+ self.remote_aws_region = args.remote_aws_region # type: str
+ self.remote_terminate = args.remote_terminate # type: str
+
+ if self.remote_provider == 'default':
+ self.remote_provider = None
+
+ self.requirements = args.requirements # type: bool
+
+ if self.python == 'default':
+ self.python = None
+
+ actual_major_minor = '.'.join(str(i) for i in sys.version_info[:2])
+
+ self.python_version = self.python or actual_major_minor
+ self.python_interpreter = args.python_interpreter
+
+ self.pip_check = args.pip_check
+
+ self.delegate = self.docker or self.remote or self.venv
+ self.delegate_args = [] # type: t.List[str]
+
+ if self.delegate:
+ self.requirements = True
+
+ self.inject_httptester = args.inject_httptester if 'inject_httptester' in args else False # type: bool
+ self.httptester = docker_qualify_image(args.httptester if 'httptester' in args else '') # type: str
+
+ if self.get_delegated_completion().get('httptester', 'enabled') == 'disabled':
+ self.httptester = False
+
+ if self.get_delegated_completion().get('pip-check', 'enabled') == 'disabled':
+ self.pip_check = False
+
+ if args.check_python and args.check_python != actual_major_minor:
+ raise ApplicationError('Running under Python %s instead of Python %s as expected.' % (actual_major_minor, args.check_python))
+
+ if self.docker_keep_git:
+ def git_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add files from the content root .git directory to the payload file list."""
+ for dirpath, _dirnames, filenames in os.walk(os.path.join(data_context().content.root, '.git')):
+ paths = [os.path.join(dirpath, filename) for filename in filenames]
+ files.extend((path, os.path.relpath(path, data_context().content.root)) for path in paths)
+
+ data_context().register_payload_callback(git_callback)
+
+ @property
+ def python_executable(self):
+ """
+ :rtype: str
+ """
+ return find_python(self.python_version)
+
+ @property
+ def pip_command(self):
+ """
+ :rtype: list[str]
+ """
+ return generate_pip_command(self.python_executable)
+
+ def get_delegated_completion(self):
+ """Returns a dictionary of settings specific to the selected delegation system, if any. Otherwise returns an empty dictionary.
+ :rtype: dict[str, str]
+ """
+ if self.docker:
+ return get_docker_completion().get(self.docker_raw, {})
+
+ if self.remote:
+ return get_remote_completion().get(self.remote, {})
+
+ return {}
+
+
+class TestConfig(EnvironmentConfig):
+ """Configuration common to all test commands."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(TestConfig, self).__init__(args, command)
+
+ self.coverage = args.coverage # type: bool
+ self.coverage_label = args.coverage_label # type: str
+ self.coverage_check = args.coverage_check # type: bool
+ self.coverage_config_base_path = None # type: t.Optional[str]
+ self.include = args.include or [] # type: t.List[str]
+ self.exclude = args.exclude or [] # type: t.List[str]
+ self.require = args.require or [] # type: t.List[str]
+
+ self.changed = args.changed # type: bool
+ self.tracked = args.tracked # type: bool
+ self.untracked = args.untracked # type: bool
+ self.committed = args.committed # type: bool
+ self.staged = args.staged # type: bool
+ self.unstaged = args.unstaged # type: bool
+ self.changed_from = args.changed_from # type: str
+ self.changed_path = args.changed_path # type: t.List[str]
+ self.base_branch = args.base_branch # type: str
+
+ self.lint = args.lint if 'lint' in args else False # type: bool
+ self.junit = args.junit if 'junit' in args else False # type: bool
+ self.failure_ok = args.failure_ok if 'failure_ok' in args else False # type: bool
+
+ self.metadata = Metadata.from_file(args.metadata) if args.metadata else Metadata()
+ self.metadata_path = None
+
+ if self.coverage_check:
+ self.coverage = True
+
+ def metadata_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """Add the metadata file to the payload file list."""
+ config = self
+
+ if self.metadata_path:
+ files.append((os.path.abspath(config.metadata_path), config.metadata_path))
+
+ data_context().register_payload_callback(metadata_callback)
+
+
+class ShellConfig(EnvironmentConfig):
+ """Configuration for the shell command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(ShellConfig, self).__init__(args, 'shell')
+
+ self.raw = args.raw # type: bool
+
+ if self.raw:
+ self.httptester = False
+
+
+class SanityConfig(TestConfig):
+ """Configuration for the sanity command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(SanityConfig, self).__init__(args, 'sanity')
+
+ self.test = args.test # type: t.List[str]
+ self.skip_test = args.skip_test # type: t.List[str]
+ self.list_tests = args.list_tests # type: bool
+ self.allow_disabled = args.allow_disabled # type: bool
+ self.enable_optional_errors = args.enable_optional_errors # type: bool
+ self.info_stderr = self.lint
+
+
+class IntegrationConfig(TestConfig):
+ """Configuration for the integration command."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ super(IntegrationConfig, self).__init__(args, command)
+
+ self.start_at = args.start_at # type: str
+ self.start_at_task = args.start_at_task # type: str
+ self.allow_destructive = args.allow_destructive # type: bool
+ self.allow_root = args.allow_root # type: bool
+ self.allow_disabled = args.allow_disabled # type: bool
+ self.allow_unstable = args.allow_unstable # type: bool
+ self.allow_unstable_changed = args.allow_unstable_changed # type: bool
+ self.allow_unsupported = args.allow_unsupported # type: bool
+ self.retry_on_error = args.retry_on_error # type: bool
+ self.continue_on_error = args.continue_on_error # type: bool
+ self.debug_strategy = args.debug_strategy # type: bool
+ self.changed_all_target = args.changed_all_target # type: str
+ self.changed_all_mode = args.changed_all_mode # type: str
+ self.list_targets = args.list_targets # type: bool
+ self.tags = args.tags
+ self.skip_tags = args.skip_tags
+ self.diff = args.diff
+ self.no_temp_workdir = args.no_temp_workdir
+ self.no_temp_unicode = args.no_temp_unicode
+
+ if self.get_delegated_completion().get('temp-unicode', 'enabled') == 'disabled':
+ self.no_temp_unicode = True
+
+ if self.list_targets:
+ self.explain = True
+ self.info_stderr = True
+
+ def get_ansible_config(self): # type: () -> str
+ """Return the path to the Ansible config for the given config."""
+ ansible_config_relative_path = os.path.join(data_context().content.integration_path, '%s.cfg' % self.command)
+ ansible_config_path = os.path.join(data_context().content.root, ansible_config_relative_path)
+
+ if not os.path.exists(ansible_config_path):
+ # use the default empty configuration unless one has been provided
+ ansible_config_path = super(IntegrationConfig, self).get_ansible_config()
+
+ return ansible_config_path
+
+
+class PosixIntegrationConfig(IntegrationConfig):
+ """Configuration for the posix integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(PosixIntegrationConfig, self).__init__(args, 'integration')
+
+
+class WindowsIntegrationConfig(IntegrationConfig):
+ """Configuration for the windows integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(WindowsIntegrationConfig, self).__init__(args, 'windows-integration')
+
+ self.windows = args.windows # type: t.List[str]
+ self.inventory = args.inventory # type: str
+
+ if self.windows:
+ self.allow_destructive = True
+
+
+class NetworkIntegrationConfig(IntegrationConfig):
+ """Configuration for the network integration command."""
+
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(NetworkIntegrationConfig, self).__init__(args, 'network-integration')
+
+ self.platform = args.platform # type: t.List[str]
+ self.platform_collection = dict(args.platform_collection or []) # type: t.Dict[str, str]
+ self.platform_connection = dict(args.platform_connection or []) # type: t.Dict[str, str]
+ self.inventory = args.inventory # type: str
+ self.testcase = args.testcase # type: str
+
+
+class UnitsConfig(TestConfig):
+ """Configuration for the units command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(UnitsConfig, self).__init__(args, 'units')
+
+ self.collect_only = args.collect_only # type: bool
+ self.num_workers = args.num_workers # type: int
+
+ self.requirements_mode = args.requirements_mode if 'requirements_mode' in args else ''
+
+ if self.requirements_mode == 'only':
+ self.requirements = True
+ elif self.requirements_mode == 'skip':
+ self.requirements = False
diff --git a/test/lib/ansible_test/_internal/constants.py b/test/lib/ansible_test/_internal/constants.py
new file mode 100644
index 00000000..f4307822
--- /dev/null
+++ b/test/lib/ansible_test/_internal/constants.py
@@ -0,0 +1,10 @@
+"""Constants used by ansible-test. Imports should not be used in this file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Setting a low soft RLIMIT_NOFILE value will improve the performance of subprocess.Popen on Python 2.x when close_fds=True.
+# This will affect all Python subprocesses. It will also affect the current Python process if set before subprocess is imported for the first time.
+SOFT_RLIMIT_NOFILE = 1024
+
+# File used to track the ansible-test test execution timeout.
+TIMEOUT_PATH = '.ansible-test-timeout.json'
diff --git a/test/lib/ansible_test/_internal/core_ci.py b/test/lib/ansible_test/_internal/core_ci.py
new file mode 100644
index 00000000..c984f4fe
--- /dev/null
+++ b/test/lib/ansible_test/_internal/core_ci.py
@@ -0,0 +1,680 @@
+"""Access Ansible Core CI remote services."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+import traceback
+import uuid
+import errno
+import time
+
+from . import types as t
+
+from .http import (
+ HttpClient,
+ HttpResponse,
+ HttpError,
+)
+
+from .io import (
+ make_dirs,
+ read_text_file,
+ write_json_file,
+ write_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .ci import (
+ AuthContext,
+ get_ci_provider,
+)
+
+from .data import (
+ data_context,
+)
+
+AWS_ENDPOINTS = {
+ 'us-east-1': 'https://ansible-core-ci.testing.ansible.com',
+}
+
+
+class AnsibleCoreCI:
+ """Client for Ansible Core CI services."""
+ def __init__(self, args, platform, version, stage='prod', persist=True, load=True, provider=None, arch=None):
+ """
+ :type args: EnvironmentConfig
+ :type platform: str
+ :type version: str
+ :type stage: str
+ :type persist: bool
+ :type load: bool
+ :type provider: str | None
+ :type arch: str | None
+ """
+ self.args = args
+ self.arch = arch
+ self.platform = platform
+ self.version = version
+ self.stage = stage
+ self.client = HttpClient(args)
+ self.connection = None
+ self.instance_id = None
+ self.endpoint = None
+ self.max_threshold = 1
+ self.retries = 3
+ self.ci_provider = get_ci_provider()
+ self.auth_context = AuthContext()
+
+ if self.arch:
+ self.name = '%s-%s-%s' % (self.arch, self.platform, self.version)
+ else:
+ self.name = '%s-%s' % (self.platform, self.version)
+
+ # Assign each supported platform to one provider.
+ # This is used to determine the provider from the platform when no provider is specified.
+ providers = dict(
+ aws=(
+ 'aws',
+ 'windows',
+ 'freebsd',
+ 'vyos',
+ 'junos',
+ 'ios',
+ 'tower',
+ 'rhel',
+ 'hetzner',
+ ),
+ azure=(
+ 'azure',
+ ),
+ ibmps=(
+ 'aix',
+ 'ibmi',
+ ),
+ ibmvpc=(
+ 'centos arch=power', # avoid ibmvpc as default for no-arch centos to avoid making centos default to power
+ ),
+ parallels=(
+ 'macos',
+ 'osx',
+ ),
+ )
+
+ # Currently ansible-core-ci has no concept of arch selection. This effectively means each provider only supports one arch.
+ # The list below identifies which platforms accept an arch, and which one. These platforms can only be used with the specified arch.
+ provider_arches = dict(
+ ibmvpc='power',
+ )
+
+ if provider:
+ # override default provider selection (not all combinations are valid)
+ self.provider = provider
+ else:
+ self.provider = None
+
+ for candidate in providers:
+ choices = [
+ platform,
+ '%s arch=%s' % (platform, arch),
+ ]
+
+ if any(choice in providers[candidate] for choice in choices):
+ # assign default provider based on platform
+ self.provider = candidate
+ break
+
+ # If a provider has been selected, make sure the correct arch (or none) has been selected.
+ if self.provider:
+ required_arch = provider_arches.get(self.provider)
+
+ if self.arch != required_arch:
+ if required_arch:
+ if self.arch:
+ raise ApplicationError('Provider "%s" requires the "%s" arch instead of "%s".' % (self.provider, required_arch, self.arch))
+
+ raise ApplicationError('Provider "%s" requires the "%s" arch.' % (self.provider, required_arch))
+
+ raise ApplicationError('Provider "%s" does not support specification of an arch.' % self.provider)
+
+ self.path = os.path.expanduser('~/.ansible/test/instances/%s-%s-%s' % (self.name, self.provider, self.stage))
+
+ if self.provider in ('aws', 'azure', 'ibmps', 'ibmvpc'):
+ if args.remote_aws_region:
+ display.warning('The --remote-aws-region option is obsolete and will be removed in a future version of ansible-test.')
+ # permit command-line override of region selection
+ region = args.remote_aws_region
+ # use a dedicated CI key when overriding the region selection
+ self.auth_context.region = args.remote_aws_region
+ else:
+ region = 'us-east-1'
+
+ self.path = "%s-%s" % (self.path, region)
+
+ if self.args.remote_endpoint:
+ self.endpoints = (self.args.remote_endpoint,)
+ else:
+ self.endpoints = (AWS_ENDPOINTS[region],)
+
+ self.ssh_key = SshKey(args)
+
+ if self.platform == 'windows':
+ self.port = 5986
+ else:
+ self.port = 22
+
+ if self.provider == 'ibmps':
+ # Additional retries are neededed to accommodate images transitioning
+ # to the active state in the IBM cloud. This operation can take up to
+ # 90 seconds
+ self.retries = 7
+ elif self.provider == 'parallels':
+ if self.args.remote_endpoint:
+ self.endpoints = (self.args.remote_endpoint,)
+ else:
+ self.endpoints = (AWS_ENDPOINTS['us-east-1'],)
+
+ self.ssh_key = SshKey(args)
+ self.port = None
+ else:
+ if self.arch:
+ raise ApplicationError('Provider not detected for platform "%s" on arch "%s".' % (self.platform, self.arch))
+
+ raise ApplicationError('Provider not detected for platform "%s" with no arch specified.' % self.platform)
+
+ if persist and load and self._load():
+ try:
+ display.info('Checking existing %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+
+ self.connection = self.get(always_raise_on=[404])
+
+ display.info('Loaded existing %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
+ except HttpError as ex:
+ if ex.status != 404:
+ raise
+
+ self._clear()
+
+ display.info('Cleared stale %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+
+ self.instance_id = None
+ self.endpoint = None
+ elif not persist:
+ self.instance_id = None
+ self.endpoint = None
+ self._clear()
+
+ if self.instance_id:
+ self.started = True
+ else:
+ self.started = False
+ self.instance_id = str(uuid.uuid4())
+ self.endpoint = None
+
+ display.sensitive.add(self.instance_id)
+
+ def _get_parallels_endpoints(self):
+ """
+ :rtype: tuple[str]
+ """
+ client = HttpClient(self.args, always=True)
+ display.info('Getting available endpoints...', verbosity=1)
+ sleep = 3
+
+ for _iteration in range(1, 10):
+ response = client.get('https://ansible-ci-files.s3.amazonaws.com/ansible-test/parallels-endpoints.txt')
+
+ if response.status_code == 200:
+ endpoints = tuple(response.response.splitlines())
+ display.info('Available endpoints (%d):\n%s' % (len(endpoints), '\n'.join(' - %s' % endpoint for endpoint in endpoints)), verbosity=1)
+ return endpoints
+
+ display.warning('HTTP %d error getting endpoints, trying again in %d seconds.' % (response.status_code, sleep))
+ time.sleep(sleep)
+
+ raise ApplicationError('Unable to get available endpoints.')
+
+ @property
+ def available(self):
+ """Return True if Ansible Core CI is supported."""
+ return self.ci_provider.supports_core_ci_auth(self.auth_context)
+
+ def start(self):
+ """Start instance."""
+ if self.started:
+ display.info('Skipping started %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return None
+
+ return self._start(self.ci_provider.prepare_core_ci_auth(self.auth_context))
+
+ def stop(self):
+ """Stop instance."""
+ if not self.started:
+ display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ response = self.client.delete(self._uri)
+
+ if response.status_code == 404:
+ self._clear()
+ display.info('Cleared invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ if response.status_code == 200:
+ self._clear()
+ display.info('Stopped running %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return
+
+ raise self._create_http_error(response)
+
+ def get(self, tries=3, sleep=15, always_raise_on=None):
+ """
+ Get instance connection information.
+ :type tries: int
+ :type sleep: int
+ :type always_raise_on: list[int] | None
+ :rtype: InstanceConnection
+ """
+ if not self.started:
+ display.info('Skipping invalid %s/%s instance %s.' % (self.platform, self.version, self.instance_id),
+ verbosity=1)
+ return None
+
+ if not always_raise_on:
+ always_raise_on = []
+
+ if self.connection and self.connection.running:
+ return self.connection
+
+ while True:
+ tries -= 1
+ response = self.client.get(self._uri)
+
+ if response.status_code == 200:
+ break
+
+ error = self._create_http_error(response)
+
+ if not tries or response.status_code in always_raise_on:
+ raise error
+
+ display.warning('%s. Trying again after %d seconds.' % (error, sleep))
+ time.sleep(sleep)
+
+ if self.args.explain:
+ self.connection = InstanceConnection(
+ running=True,
+ hostname='cloud.example.com',
+ port=self.port or 12345,
+ username='username',
+ password='password' if self.platform == 'windows' else None,
+ )
+ else:
+ response_json = response.json()
+ status = response_json['status']
+ con = response_json.get('connection')
+
+ if con:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ hostname=con['hostname'],
+ port=int(con.get('port', self.port)),
+ username=con['username'],
+ password=con.get('password'),
+ response_json=response_json,
+ )
+ else:
+ self.connection = InstanceConnection(
+ running=status == 'running',
+ response_json=response_json,
+ )
+
+ if self.connection.password:
+ display.sensitive.add(str(self.connection.password))
+
+ status = 'running' if self.connection.running else 'starting'
+
+ display.info('Status update: %s/%s on instance %s is %s.' %
+ (self.platform, self.version, self.instance_id, status),
+ verbosity=1)
+
+ return self.connection
+
+ def wait(self, iterations=90): # type: (t.Optional[int]) -> None
+ """Wait for the instance to become ready."""
+ for _iteration in range(1, iterations):
+ if self.get().running:
+ return
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.platform, self.version, self.instance_id))
+
+ @property
+ def _uri(self):
+ return '%s/%s/%s/%s' % (self.endpoint, self.stage, self.provider, self.instance_id)
+
+ def _start(self, auth):
+ """Start instance."""
+ display.info('Initializing new %s/%s instance %s.' % (self.platform, self.version, self.instance_id), verbosity=1)
+
+ if self.platform == 'windows':
+ winrm_config = read_text_file(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'ConfigureRemotingForAnsible.ps1'))
+ else:
+ winrm_config = None
+
+ data = dict(
+ config=dict(
+ platform=self.platform,
+ version=self.version,
+ public_key=self.ssh_key.pub_contents if self.ssh_key else None,
+ query=False,
+ winrm_config=winrm_config,
+ )
+ )
+
+ data.update(dict(auth=auth))
+
+ headers = {
+ 'Content-Type': 'application/json',
+ }
+
+ response = self._start_try_endpoints(data, headers)
+
+ self.started = True
+ self._save()
+
+ display.info('Started %s/%s from: %s' % (self.platform, self.version, self._uri), verbosity=1)
+
+ if self.args.explain:
+ return {}
+
+ return response.json()
+
+ def _start_try_endpoints(self, data, headers):
+ """
+ :type data: dict[str, any]
+ :type headers: dict[str, str]
+ :rtype: HttpResponse
+ """
+ threshold = 1
+
+ while threshold <= self.max_threshold:
+ for self.endpoint in self.endpoints:
+ try:
+ return self._start_at_threshold(data, headers, threshold)
+ except CoreHttpError as ex:
+ if ex.status == 503:
+ display.info('Service Unavailable: %s' % ex.remote_message, verbosity=1)
+ continue
+ display.error(ex.remote_message)
+ except HttpError as ex:
+ display.error(u'%s' % ex)
+
+ time.sleep(3)
+
+ threshold += 1
+
+ raise ApplicationError('Maximum threshold reached and all endpoints exhausted.')
+
+ def _start_at_threshold(self, data, headers, threshold):
+ """
+ :type data: dict[str, any]
+ :type headers: dict[str, str]
+ :type threshold: int
+ :rtype: HttpResponse | None
+ """
+ tries = self.retries
+ sleep = 15
+
+ data['threshold'] = threshold
+
+ display.info('Trying endpoint: %s (threshold %d)' % (self.endpoint, threshold), verbosity=1)
+
+ while True:
+ tries -= 1
+ response = self.client.put(self._uri, data=json.dumps(data), headers=headers)
+
+ if response.status_code == 200:
+ return response
+
+ error = self._create_http_error(response)
+
+ if response.status_code == 503:
+ raise error
+
+ if not tries:
+ raise error
+
+ display.warning('%s. Trying again after %d seconds.' % (error, sleep))
+ time.sleep(sleep)
+
+ def _clear(self):
+ """Clear instance information."""
+ try:
+ self.connection = None
+ os.remove(self.path)
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+ def _load(self):
+ """Load instance information."""
+ try:
+ data = read_text_file(self.path)
+ except IOError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+ return False
+
+ if not data.startswith('{'):
+ return False # legacy format
+
+ config = json.loads(data)
+
+ return self.load(config)
+
+ def load(self, config):
+ """
+ :type config: dict[str, str]
+ :rtype: bool
+ """
+ self.instance_id = str(config['instance_id'])
+ self.endpoint = config['endpoint']
+ self.started = True
+
+ display.sensitive.add(self.instance_id)
+
+ return True
+
+ def _save(self):
+ """Save instance information."""
+ if self.args.explain:
+ return
+
+ config = self.save()
+
+ write_json_file(self.path, config, create_directories=True)
+
+ def save(self):
+ """
+ :rtype: dict[str, str]
+ """
+ return dict(
+ platform_version='%s/%s' % (self.platform, self.version),
+ instance_id=self.instance_id,
+ endpoint=self.endpoint,
+ )
+
+ @staticmethod
+ def _create_http_error(response):
+ """
+ :type response: HttpResponse
+ :rtype: ApplicationError
+ """
+ response_json = response.json()
+ stack_trace = ''
+
+ if 'message' in response_json:
+ message = response_json['message']
+ elif 'errorMessage' in response_json:
+ message = response_json['errorMessage'].strip()
+ if 'stackTrace' in response_json:
+ traceback_lines = response_json['stackTrace']
+
+ # AWS Lambda on Python 2.7 returns a list of tuples
+ # AWS Lambda on Python 3.7 returns a list of strings
+ if traceback_lines and isinstance(traceback_lines[0], list):
+ traceback_lines = traceback.format_list(traceback_lines)
+
+ trace = '\n'.join([x.rstrip() for x in traceback_lines])
+ stack_trace = ('\nTraceback (from remote server):\n%s' % trace)
+ else:
+ message = str(response_json)
+
+ return CoreHttpError(response.status_code, message, stack_trace)
+
+
+class CoreHttpError(HttpError):
+ """HTTP response as an error."""
+ def __init__(self, status, remote_message, remote_stack_trace):
+ """
+ :type status: int
+ :type remote_message: str
+ :type remote_stack_trace: str
+ """
+ super(CoreHttpError, self).__init__(status, '%s%s' % (remote_message, remote_stack_trace))
+
+ self.remote_message = remote_message
+ self.remote_stack_trace = remote_stack_trace
+
+
+class SshKey:
+ """Container for SSH key used to connect to remote instances."""
+ KEY_NAME = 'id_rsa'
+ PUB_NAME = 'id_rsa.pub'
+
+ def __init__(self, args):
+ """
+ :type args: EnvironmentConfig
+ """
+ key_pair = self.get_key_pair()
+
+ if not key_pair:
+ key_pair = self.generate_key_pair(args)
+
+ key, pub = key_pair
+ key_dst, pub_dst = self.get_in_tree_key_pair_paths()
+
+ def ssh_key_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the SSH keys to the payload file list.
+ They are either outside the source tree or in the cache dir which is ignored by default.
+ """
+ files.append((key, os.path.relpath(key_dst, data_context().content.root)))
+ files.append((pub, os.path.relpath(pub_dst, data_context().content.root)))
+
+ data_context().register_payload_callback(ssh_key_callback)
+
+ self.key, self.pub = key, pub
+
+ if args.explain:
+ self.pub_contents = None
+ else:
+ self.pub_contents = read_text_file(self.pub).strip()
+
+ def get_in_tree_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths from the content tree."""
+ temp_dir = ResultType.TMP.path
+
+ key = os.path.join(temp_dir, self.KEY_NAME)
+ pub = os.path.join(temp_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_source_key_pair_paths(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths for the current user."""
+ base_dir = os.path.expanduser('~/.ansible/test/')
+
+ key = os.path.join(base_dir, self.KEY_NAME)
+ pub = os.path.join(base_dir, self.PUB_NAME)
+
+ return key, pub
+
+ def get_key_pair(self): # type: () -> t.Optional[t.Tuple[str, str]]
+ """Return the ansible-test SSH key pair paths if present, otherwise return None."""
+ key, pub = self.get_in_tree_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ key, pub = self.get_source_key_pair_paths()
+
+ if os.path.isfile(key) and os.path.isfile(pub):
+ return key, pub
+
+ return None
+
+ def generate_key_pair(self, args): # type: (EnvironmentConfig) -> t.Tuple[str, str]
+ """Generate an SSH key pair for use by all ansible-test invocations for the current user."""
+ key, pub = self.get_source_key_pair_paths()
+
+ if not args.explain:
+ make_dirs(os.path.dirname(key))
+
+ if not os.path.isfile(key) or not os.path.isfile(pub):
+ run_command(args, ['ssh-keygen', '-m', 'PEM', '-q', '-t', 'rsa', '-N', '', '-f', key])
+
+ # newer ssh-keygen PEM output (such as on RHEL 8.1) is not recognized by paramiko
+ key_contents = read_text_file(key)
+ key_contents = re.sub(r'(BEGIN|END) PRIVATE KEY', r'\1 RSA PRIVATE KEY', key_contents)
+
+ write_text_file(key, key_contents)
+
+ return key, pub
+
+
+class InstanceConnection:
+ """Container for remote instance status and connection details."""
+ def __init__(self,
+ running, # type: bool
+ hostname=None, # type: t.Optional[str]
+ port=None, # type: t.Optional[int]
+ username=None, # type: t.Optional[str]
+ password=None, # type: t.Optional[str]
+ response_json=None, # type: t.Optional[t.Dict[str, t.Any]]
+ ): # type: (...) -> None
+ self.running = running
+ self.hostname = hostname
+ self.port = port
+ self.username = username
+ self.password = password
+ self.response_json = response_json or {}
+
+ def __str__(self):
+ if self.password:
+ return '%s:%s [%s:%s]' % (self.hostname, self.port, self.username, self.password)
+
+ return '%s:%s [%s]' % (self.hostname, self.port, self.username)
diff --git a/test/lib/ansible_test/_internal/coverage/__init__.py b/test/lib/ansible_test/_internal/coverage/__init__.py
new file mode 100644
index 00000000..ebb1ca22
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/__init__.py
@@ -0,0 +1,325 @@
+"""Common logic for the coverage subcommand."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..io import (
+ open_binary_file,
+ read_json_file,
+)
+
+from ..util import (
+ ApplicationError,
+ common_environment,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ..util_common import (
+ intercept_command,
+ ResultType,
+)
+
+from ..config import (
+ EnvironmentConfig,
+)
+
+from ..executor import (
+ Delegate,
+ install_command_requirements,
+)
+
+from .. target import (
+ walk_module_targets,
+)
+
+from ..data import (
+ data_context,
+)
+
+if t.TYPE_CHECKING:
+ import coverage as coverage_module
+
+COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
+COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc')
+COVERAGE_OUTPUT_FILE_NAME = 'coverage'
+
+
+class CoverageConfig(EnvironmentConfig):
+ """Configuration for the coverage command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageConfig, self).__init__(args, 'coverage')
+
+ self.group_by = frozenset(args.group_by) if 'group_by' in args and args.group_by else set() # type: t.FrozenSet[str]
+ self.all = args.all if 'all' in args else False # type: bool
+ self.stub = args.stub if 'stub' in args else False # type: bool
+ self.export = args.export if 'export' in args else None # type: str
+ self.coverage = False # temporary work-around to support intercept_command in cover.py
+
+
+def initialize_coverage(args): # type: (CoverageConfig) -> coverage_module
+ """Delegate execution if requested, install requirements, then import and return the coverage module. Raises an exception if coverage is not available."""
+ if args.delegate:
+ raise Delegate()
+
+ if args.requirements:
+ install_command_requirements(args)
+
+ try:
+ import coverage
+ except ImportError:
+ coverage = None
+
+ if not coverage:
+ raise ApplicationError('You must install the "coverage" python module to use this command.')
+
+ coverage_version_string = coverage.__version__
+ coverage_version = tuple(int(v) for v in coverage_version_string.split('.'))
+
+ min_version = (4, 2)
+ max_version = (5, 0)
+
+ supported_version = True
+ recommended_version = '4.5.4'
+
+ if coverage_version < min_version or coverage_version >= max_version:
+ supported_version = False
+
+ if not supported_version:
+ raise ApplicationError('Version %s of "coverage" is not supported. Version %s is known to work and is recommended.' % (
+ coverage_version_string, recommended_version))
+
+ return coverage
+
+
+def run_coverage(args, output_file, command, cmd): # type: (CoverageConfig, str, str, t.List[str]) -> None
+ """Run the coverage cli tool with the specified options."""
+ env = common_environment()
+ env.update(dict(COVERAGE_FILE=output_file))
+
+ cmd = ['python', '-m', 'coverage.__main__', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd
+
+ intercept_command(args, target_name='coverage', env=env, cmd=cmd, disable_coverage=True)
+
+
+def get_python_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str]
+ """Return the list of Python coverage file paths."""
+ return get_coverage_files('python', path)
+
+
+def get_powershell_coverage_files(path=None): # type: (t.Optional[str]) -> t.List[str]
+ """Return the list of PowerShell coverage file paths."""
+ return get_coverage_files('powershell', path)
+
+
+def get_coverage_files(language, path=None): # type: (str, t.Optional[str]) -> t.List[str]
+ """Return the list of coverage file paths for the given language."""
+ coverage_dir = path or ResultType.COVERAGE.path
+ coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
+ if '=coverage.' in f and '=%s' % language in f]
+
+ return coverage_files
+
+
+def get_collection_path_regexes(): # type: () -> t.Tuple[t.Optional[t.Pattern], t.Optional[t.Pattern]]
+ """Return a pair of regexes used for identifying and manipulating collection paths."""
+ if data_context().content.collection:
+ collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
+ collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
+ else:
+ collection_search_re = None
+ collection_sub_re = None
+
+ return collection_search_re, collection_sub_re
+
+
+def get_python_modules(): # type: () -> t.Dict[str, str]
+ """Return a dictionary of Ansible module names and their paths."""
+ return dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py'))
+
+
+def enumerate_python_arcs(
+ path, # type: str
+ coverage, # type: coverage_module
+ modules, # type: t.Dict[str, str]
+ collection_search_re, # type: t.Optional[t.Pattern]
+ collection_sub_re, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Generator[t.Tuple[str, t.Set[t.Tuple[int, int]]]]
+ """Enumerate Python code coverage arcs in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ original = coverage.CoverageData()
+
+ try:
+ original.read_file(path)
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ with open_binary_file(path) as file:
+ header = file.read(6)
+
+ if header == b'SQLite':
+ display.error('File created by "coverage" 5.0+: %s' % os.path.relpath(path))
+ else:
+ display.error(u'%s' % ex)
+
+ return
+
+ for filename in original.measured_files():
+ arcs = original.arcs(filename)
+
+ if not arcs:
+ # This is most likely due to using an unsupported version of coverage.
+ display.warning('No arcs found for "%s" in coverage file: %s' % (filename, path))
+ continue
+
+ filename = sanitize_filename(filename, modules=modules, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ yield filename, set(arcs)
+
+
+def enumerate_powershell_lines(
+ path, # type: str
+ collection_search_re, # type: t.Optional[t.Pattern]
+ collection_sub_re, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Generator[t.Tuple[str, t.Dict[int, int]]]
+ """Enumerate PowerShell code coverage lines in the given file."""
+ if os.path.getsize(path) == 0:
+ display.warning('Empty coverage file: %s' % path, verbosity=2)
+ return
+
+ try:
+ coverage_run = read_json_file(path)
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ display.error(u'%s' % ex)
+ return
+
+ for filename, hits in coverage_run.items():
+ filename = sanitize_filename(filename, collection_search_re=collection_search_re, collection_sub_re=collection_sub_re)
+
+ if not filename:
+ continue
+
+ if isinstance(hits, dict) and not hits.get('Line'):
+ # Input data was previously aggregated and thus uses the standard ansible-test output format for PowerShell coverage.
+ # This format differs from the more verbose format of raw coverage data from the remote Windows hosts.
+ hits = dict((int(key), value) for key, value in hits.items())
+
+ yield filename, hits
+ continue
+
+ # PowerShell unpacks arrays if there's only a single entry so this is a defensive check on that
+ if not isinstance(hits, list):
+ hits = [hits]
+
+ hits = dict((hit['Line'], hit['HitCount']) for hit in hits if hit)
+
+ yield filename, hits
+
+
+def sanitize_filename(
+ filename, # type: str
+ modules=None, # type: t.Optional[t.Dict[str, str]]
+ collection_search_re=None, # type: t.Optional[t.Pattern]
+ collection_sub_re=None, # type: t.Optional[t.Pattern]
+): # type: (...) -> t.Optional[str]
+ """Convert the given code coverage path to a local absolute path and return its, or None if the path is not valid."""
+ ansible_path = os.path.abspath('lib/ansible/') + '/'
+ root_path = data_context().content.root + '/'
+ integration_temp_path = os.path.sep + os.path.join(ResultType.TMP.relative_path, 'integration') + os.path.sep
+
+ if modules is None:
+ modules = {}
+
+ if '/ansible_modlib.zip/ansible/' in filename:
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
+ new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif collection_search_re and collection_search_re.search(filename):
+ new_name = os.path.abspath(collection_sub_re.sub('', filename))
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
+ # Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
+ new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif '/ansible_module_' in filename:
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
+ module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
+ # Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
+ # AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
+ # AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
+ module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$',
+ '\\g<module>', filename).rstrip('_')
+ if module_name not in modules:
+ display.warning('Skipping coverage of unknown module: %s' % module_name)
+ return None
+ new_name = os.path.abspath(modules[module_name])
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif re.search('^(/.*?)?/root/ansible/', filename):
+ # Rewrite the path of code running on a remote host or in a docker container as root.
+ new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+ elif integration_temp_path in filename:
+ # Rewrite the path of code running from an integration test temporary directory.
+ new_name = re.sub(r'^.*' + re.escape(integration_temp_path) + '[^/]+/', root_path, filename)
+ display.info('%s -> %s' % (filename, new_name), verbosity=3)
+ filename = new_name
+
+ filename = os.path.abspath(filename) # make sure path is absolute (will be relative if previously exported)
+
+ return filename
+
+
+class PathChecker:
+ """Checks code coverage paths to verify they are valid and reports on the findings."""
+ def __init__(self, args, collection_search_re=None): # type: (CoverageConfig, t.Optional[t.Pattern]) -> None
+ self.args = args
+ self.collection_search_re = collection_search_re
+ self.invalid_paths = []
+ self.invalid_path_chars = 0
+
+ def check_path(self, path): # type: (str) -> bool
+ """Return True if the given coverage path is valid, otherwise display a warning and return False."""
+ if os.path.isfile(to_bytes(path)):
+ return True
+
+ if self.collection_search_re and self.collection_search_re.search(path) and os.path.basename(path) == '__init__.py':
+ # the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
+ # coverage is still reported for these non-existent files, but warnings are not needed
+ return False
+
+ self.invalid_paths.append(path)
+ self.invalid_path_chars += len(path)
+
+ if self.args.verbosity > 1:
+ display.warning('Invalid coverage path: %s' % path)
+
+ return False
+
+ def report(self): # type: () -> None
+ """Display a warning regarding invalid paths if any were found."""
+ if self.invalid_paths:
+ display.warning('Ignored %d characters from %d invalid coverage path(s).' % (self.invalid_path_chars, len(self.invalid_paths)))
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py
new file mode 100644
index 00000000..45770373
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/__init__.py
@@ -0,0 +1,19 @@
+"""Common logic for the `coverage analyze` subcommand."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ... import types as t
+
+from .. import (
+ CoverageConfig,
+)
+
+
+class CoverageAnalyzeConfig(CoverageConfig):
+ """Configuration for the `coverage analyze` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeConfig, self).__init__(args)
+
+ # avoid mixing log messages with file output when using `/dev/stdout` for the output file on commands
+ # this may be worth considering as the default behavior in the future, instead of being dependent on the command or options used
+ self.info_stderr = True
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py
new file mode 100644
index 00000000..8fe571b8
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/__init__.py
@@ -0,0 +1,154 @@
+"""Analyze integration test target code coverage."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....io import (
+ read_json_file,
+ write_json_file,
+)
+
+from ....util import (
+ ApplicationError,
+ display,
+)
+
+from .. import (
+ CoverageAnalyzeConfig,
+)
+
+if t.TYPE_CHECKING:
+ TargetKey = t.TypeVar('TargetKey', int, t.Tuple[int, int])
+ NamedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[str]]]
+ IndexedPoints = t.Dict[str, t.Dict[TargetKey, t.Set[int]]]
+ Arcs = t.Dict[str, t.Dict[t.Tuple[int, int], t.Set[int]]]
+ Lines = t.Dict[str, t.Dict[int, t.Set[int]]]
+ TargetIndexes = t.Dict[str, int]
+ TargetSetIndexes = t.Dict[t.FrozenSet[int], int]
+
+
+class CoverageAnalyzeTargetsConfig(CoverageAnalyzeConfig):
+ """Configuration for the `coverage analyze targets` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsConfig, self).__init__(args)
+
+ self.info_stderr = True
+
+
+def make_report(target_indexes, arcs, lines): # type: (TargetIndexes, Arcs, Lines) -> t.Dict[str, t.Any]
+ """Condense target indexes, arcs and lines into a compact report."""
+ set_indexes = {}
+ arc_refs = dict((path, dict((format_arc(arc), get_target_set_index(indexes, set_indexes)) for arc, indexes in data.items())) for path, data in arcs.items())
+ line_refs = dict((path, dict((line, get_target_set_index(indexes, set_indexes)) for line, indexes in data.items())) for path, data in lines.items())
+
+ report = dict(
+ targets=[name for name, index in sorted(target_indexes.items(), key=lambda kvp: kvp[1])],
+ target_sets=[sorted(data) for data, index in sorted(set_indexes.items(), key=lambda kvp: kvp[1])],
+ arcs=arc_refs,
+ lines=line_refs,
+ )
+
+ return report
+
+
+def load_report(report): # type: (t.Dict[str, t.Any]) -> t.Tuple[t.List[str], Arcs, Lines]
+ """Extract target indexes, arcs and lines from an existing report."""
+ try:
+ target_indexes = report['targets'] # type: t.List[str]
+ target_sets = report['target_sets'] # type: t.List[t.List[int]]
+ arc_data = report['arcs'] # type: t.Dict[str, t.Dict[str, int]]
+ line_data = report['lines'] # type: t.Dict[str, t.Dict[int, int]]
+ except KeyError as ex:
+ raise ApplicationError('Document is missing key "%s".' % ex.args)
+ except TypeError:
+ raise ApplicationError('Document is type "%s" instead of "dict".' % type(report).__name__)
+
+ arcs = dict((path, dict((parse_arc(arc), set(target_sets[index])) for arc, index in data.items())) for path, data in arc_data.items())
+ lines = dict((path, dict((int(line), set(target_sets[index])) for line, index in data.items())) for path, data in line_data.items())
+
+ return target_indexes, arcs, lines
+
+
+def read_report(path): # type: (str) -> t.Tuple[t.List[str], Arcs, Lines]
+ """Read a JSON report from disk."""
+ try:
+ report = read_json_file(path)
+ except Exception as ex:
+ raise ApplicationError('File "%s" is not valid JSON: %s' % (path, ex))
+
+ try:
+ return load_report(report)
+ except ApplicationError as ex:
+ raise ApplicationError('File "%s" is not an aggregated coverage data file. %s' % (path, ex))
+
+
+def write_report(args, report, path): # type: (CoverageAnalyzeTargetsConfig, t.Dict[str, t.Any], str) -> None
+ """Write a JSON report to disk."""
+ if args.explain:
+ return
+
+ write_json_file(path, report, formatted=False)
+
+ display.info('Generated %d byte report with %d targets covering %d files.' % (
+ os.path.getsize(path), len(report['targets']), len(set(report['arcs'].keys()) | set(report['lines'].keys())),
+ ), verbosity=1)
+
+
+def format_arc(value): # type: (t.Tuple[int, int]) -> str
+ """Format an arc tuple as a string."""
+ return '%d:%d' % value
+
+
+def parse_arc(value): # type: (str) -> t.Tuple[int, int]
+ """Parse an arc string into a tuple."""
+ first, last = tuple(map(int, value.split(':')))
+ return first, last
+
+
+def get_target_set_index(data, target_set_indexes): # type: (t.Set[int], TargetSetIndexes) -> int
+ """Find or add the target set in the result set and return the target set index."""
+ return target_set_indexes.setdefault(frozenset(data), len(target_set_indexes))
+
+
+def get_target_index(name, target_indexes): # type: (str, TargetIndexes) -> int
+ """Find or add the target in the result set and return the target index."""
+ return target_indexes.setdefault(name, len(target_indexes))
+
+
+def expand_indexes(
+ source_data, # type: IndexedPoints
+ source_index, # type: t.List[str]
+ format_func, # type: t.Callable[t.Tuple[t.Any], str]
+): # type: (...) -> NamedPoints
+ """Expand indexes from the source into target names for easier processing of the data (arcs or lines)."""
+ combined_data = {} # type: t.Dict[str, t.Dict[t.Any, t.Set[str]]]
+
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(format_func(covered_point), set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(source_index[covered_target_index])
+
+ return combined_data
+
+
+def generate_indexes(target_indexes, data): # type: (TargetIndexes, NamedPoints) -> IndexedPoints
+ """Return an indexed version of the given data (arcs or points)."""
+ results = {} # type: IndexedPoints
+
+ for path, points in data.items():
+ result_points = results[path] = {}
+
+ for point, target_names in points.items():
+ result_point = result_points[point] = set()
+
+ for target_name in target_names:
+ result_point.add(get_target_index(target_name, target_indexes))
+
+ return results
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py
new file mode 100644
index 00000000..35148ff6
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py
@@ -0,0 +1,64 @@
+"""Combine integration test target code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .... import types as t
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ Arcs,
+ IndexedPoints,
+ Lines,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets combine` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsCombineConfig, self).__init__(args)
+
+ self.input_files = args.input_file # type: t.List[str]
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_combine(args): # type: (CoverageAnalyzeTargetsCombineConfig) -> None
+ """Combine integration test target code coverage reports."""
+ combined_target_indexes = {} # type: TargetIndexes
+ combined_path_arcs = {} # type: Arcs
+ combined_path_lines = {} # type: Lines
+
+ for report_path in args.input_files:
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
+
+ merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
+ merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
+
+ report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def merge_indexes(
+ source_data, # type: IndexedPoints
+ source_index, # type: t.List[str]
+ combined_data, # type: IndexedPoints
+ combined_index, # type: TargetIndexes
+): # type: (...) -> None
+ """Merge indexes from the source into the combined data set (arcs or lines)."""
+ for covered_path, covered_points in source_data.items():
+ combined_points = combined_data.setdefault(covered_path, {})
+
+ for covered_point, covered_target_indexes in covered_points.items():
+ combined_point = combined_points.setdefault(covered_point, set())
+
+ for covered_target_index in covered_target_indexes:
+ combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py
new file mode 100644
index 00000000..388dd6cb
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/expand.py
@@ -0,0 +1,39 @@
+"""Expand target names in an aggregated coverage file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .... import types as t
+
+from ....io import (
+ SortedSetEncoder,
+ write_json_file,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ format_arc,
+ read_report,
+)
+
+
+class CoverageAnalyzeTargetsExpandConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets expand` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsExpandConfig, self).__init__(args)
+
+ self.input_file = args.input_file # type: str
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_expand(args): # type: (CoverageAnalyzeTargetsExpandConfig) -> None
+ """Expand target names in an aggregated coverage file."""
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ report = dict(
+ arcs=expand_indexes(covered_path_arcs, covered_targets, format_arc),
+ lines=expand_indexes(covered_path_lines, covered_targets, str),
+ )
+
+ if not args.explain:
+ write_json_file(args.output_file, report, encoder=SortedSetEncoder)
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py
new file mode 100644
index 00000000..e90fb227
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/filter.py
@@ -0,0 +1,104 @@
+"""Filter an aggregated coverage file, keeping only the specified targets."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from .... import types as t
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ expand_indexes,
+ generate_indexes,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ NamedPoints,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsFilterConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets filter` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsFilterConfig, self).__init__(args)
+
+ self.input_file = args.input_file # type: str
+ self.output_file = args.output_file # type: str
+ self.include_targets = args.include_targets # type: t.List[str]
+ self.exclude_targets = args.exclude_targets # type: t.List[str]
+ self.include_path = args.include_path # type: t.Optional[str]
+ self.exclude_path = args.exclude_path # type: t.Optional[str]
+
+
+def command_coverage_analyze_targets_filter(args): # type: (CoverageAnalyzeTargetsFilterConfig) -> None
+ """Filter target names in an aggregated coverage file."""
+ covered_targets, covered_path_arcs, covered_path_lines = read_report(args.input_file)
+
+ filtered_path_arcs = expand_indexes(covered_path_arcs, covered_targets, lambda v: v)
+ filtered_path_lines = expand_indexes(covered_path_lines, covered_targets, lambda v: v)
+
+ include_targets = set(args.include_targets) if args.include_targets else None
+ exclude_targets = set(args.exclude_targets) if args.exclude_targets else None
+
+ include_path = re.compile(args.include_path) if args.include_path else None
+ exclude_path = re.compile(args.exclude_path) if args.exclude_path else None
+
+ def path_filter_func(path):
+ if include_path and not re.search(include_path, path):
+ return False
+
+ if exclude_path and re.search(exclude_path, path):
+ return False
+
+ return True
+
+ def target_filter_func(targets):
+ if include_targets:
+ targets &= include_targets
+
+ if exclude_targets:
+ targets -= exclude_targets
+
+ return targets
+
+ filtered_path_arcs = filter_data(filtered_path_arcs, path_filter_func, target_filter_func)
+ filtered_path_lines = filter_data(filtered_path_lines, path_filter_func, target_filter_func)
+
+ target_indexes = {} # type: TargetIndexes
+ indexed_path_arcs = generate_indexes(target_indexes, filtered_path_arcs)
+ indexed_path_lines = generate_indexes(target_indexes, filtered_path_lines)
+
+ report = make_report(target_indexes, indexed_path_arcs, indexed_path_lines)
+
+ write_report(args, report, args.output_file)
+
+
+def filter_data(
+ data, # type: NamedPoints
+ path_filter_func, # type: t.Callable[[str], bool]
+ target_filter_func, # type: t.Callable[[t.Set[str]], t.Set[str]]
+): # type: (...) -> NamedPoints
+ """Filter the data set using the specified filter function."""
+ result = {} # type: NamedPoints
+
+ for src_path, src_points in data.items():
+ if not path_filter_func(src_path):
+ continue
+
+ dst_points = {}
+
+ for src_point, src_targets in src_points.items():
+ dst_targets = target_filter_func(src_targets)
+
+ if dst_targets:
+ dst_points[src_point] = dst_targets
+
+ if dst_points:
+ result[src_path] = dst_points
+
+ return result
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py
new file mode 100644
index 00000000..a14b6f55
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/generate.py
@@ -0,0 +1,146 @@
+"""Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....encoding import (
+ to_text,
+)
+
+from ....data import (
+ data_context,
+)
+
+from ....util_common import (
+ ResultType,
+)
+
+from ... import (
+ enumerate_powershell_lines,
+ enumerate_python_arcs,
+ get_collection_path_regexes,
+ get_powershell_coverage_files,
+ get_python_coverage_files,
+ get_python_modules,
+ initialize_coverage,
+ PathChecker,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ Arcs,
+ Lines,
+ TargetIndexes,
+ )
+
+
+class CoverageAnalyzeTargetsGenerateConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets generate` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsGenerateConfig, self).__init__(args)
+
+ self.input_dir = args.input_dir or ResultType.COVERAGE.path # type: str
+ self.output_file = args.output_file # type: str
+
+
+def command_coverage_analyze_targets_generate(args): # type: (CoverageAnalyzeTargetsGenerateConfig) -> None
+ """Analyze code coverage data to determine which integration test targets provide coverage for each arc or line."""
+ root = data_context().content.root
+ target_indexes = {}
+ arcs = dict((os.path.relpath(path, root), data) for path, data in analyze_python_coverage(args, args.input_dir, target_indexes).items())
+ lines = dict((os.path.relpath(path, root), data) for path, data in analyze_powershell_coverage(args, args.input_dir, target_indexes).items())
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def analyze_python_coverage(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ path, # type: str
+ target_indexes, # type: TargetIndexes
+): # type: (...) -> Arcs
+ """Analyze Python code coverage."""
+ results = {} # type: Arcs
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ modules = get_python_modules()
+ python_files = get_python_coverage_files(path)
+ coverage = initialize_coverage(args)
+
+ for python_file in python_files:
+ if not is_integration_coverage_file(python_file):
+ continue
+
+ target_name = get_target_name(python_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, covered_arcs in enumerate_python_arcs(python_file, coverage, modules, collection_search_re, collection_sub_re):
+ arcs = results.setdefault(filename, {})
+
+ for covered_arc in covered_arcs:
+ arc = arcs.setdefault(covered_arc, set())
+ arc.add(target_index)
+
+ prune_invalid_filenames(args, results, collection_search_re=collection_search_re)
+
+ return results
+
+
+def analyze_powershell_coverage(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ path, # type: str
+ target_indexes, # type: TargetIndexes
+): # type: (...) -> Lines
+ """Analyze PowerShell code coverage"""
+ results = {} # type: Lines
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+ powershell_files = get_powershell_coverage_files(path)
+
+ for powershell_file in powershell_files:
+ if not is_integration_coverage_file(powershell_file):
+ continue
+
+ target_name = get_target_name(powershell_file)
+ target_index = get_target_index(target_name, target_indexes)
+
+ for filename, hits in enumerate_powershell_lines(powershell_file, collection_search_re, collection_sub_re):
+ lines = results.setdefault(filename, {})
+
+ for covered_line in hits:
+ line = lines.setdefault(covered_line, set())
+ line.add(target_index)
+
+ prune_invalid_filenames(args, results)
+
+ return results
+
+
+def prune_invalid_filenames(
+ args, # type: CoverageAnalyzeTargetsGenerateConfig
+ results, # type: t.Dict[str, t.Any]
+ collection_search_re=None, # type: t.Optional[str]
+): # type: (...) -> None
+ """Remove invalid filenames from the given result set."""
+ path_checker = PathChecker(args, collection_search_re)
+
+ for path in list(results.keys()):
+ if not path_checker.check_path(path):
+ del results[path]
+
+
+def get_target_name(path): # type: (str) -> str
+ """Extract the test target name from the given coverage path."""
+ return to_text(os.path.basename(path).split('=')[1])
+
+
+def is_integration_coverage_file(path): # type: (str) -> bool
+ """Returns True if the coverage file came from integration tests, otherwise False."""
+ return os.path.basename(path).split('=')[0] in ('integration', 'windows-integration', 'network-integration')
diff --git a/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py
new file mode 100644
index 00000000..613a0ef2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/analyze/targets/missing.py
@@ -0,0 +1,109 @@
+"""Identify aggregated coverage in one file missing from another."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .... import types as t
+
+from ....encoding import (
+ to_bytes,
+)
+
+from . import (
+ CoverageAnalyzeTargetsConfig,
+ get_target_index,
+ make_report,
+ read_report,
+ write_report,
+)
+
+if t.TYPE_CHECKING:
+ from . import (
+ TargetIndexes,
+ IndexedPoints,
+ )
+
+
+class CoverageAnalyzeTargetsMissingConfig(CoverageAnalyzeTargetsConfig):
+ """Configuration for the `coverage analyze targets missing` command."""
+ def __init__(self, args): # type: (t.Any) -> None
+ super(CoverageAnalyzeTargetsMissingConfig, self).__init__(args)
+
+ self.from_file = args.from_file # type: str
+ self.to_file = args.to_file # type: str
+ self.output_file = args.output_file # type: str
+
+ self.only_gaps = args.only_gaps # type: bool
+ self.only_exists = args.only_exists # type: bool
+
+
+def command_coverage_analyze_targets_missing(args): # type: (CoverageAnalyzeTargetsMissingConfig) -> None
+ """Identify aggregated coverage in one file missing from another."""
+ from_targets, from_path_arcs, from_path_lines = read_report(args.from_file)
+ to_targets, to_path_arcs, to_path_lines = read_report(args.to_file)
+ target_indexes = {}
+
+ if args.only_gaps:
+ arcs = find_gaps(from_path_arcs, from_targets, to_path_arcs, target_indexes, args.only_exists)
+ lines = find_gaps(from_path_lines, from_targets, to_path_lines, target_indexes, args.only_exists)
+ else:
+ arcs = find_missing(from_path_arcs, from_targets, to_path_arcs, to_targets, target_indexes, args.only_exists)
+ lines = find_missing(from_path_lines, from_targets, to_path_lines, to_targets, target_indexes, args.only_exists)
+
+ report = make_report(target_indexes, arcs, lines)
+ write_report(args, report, args.output_file)
+
+
+def find_gaps(
+ from_data, # type: IndexedPoints
+ from_index, # type: t.List[str]
+ to_data, # type: IndexedPoints
+ target_indexes, # type: TargetIndexes
+ only_exists, # type: bool
+): # type: (...) -> IndexedPoints
+ """Find gaps in coverage between the from and to data sets."""
+ target_data = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ gaps = set(from_points.keys()) - set(to_points.keys())
+
+ if gaps:
+ gap_points = dict((key, value) for key, value in from_points.items() if key in gaps)
+ target_data[from_path] = dict((gap, set(get_target_index(from_index[i], target_indexes) for i in indexes)) for gap, indexes in gap_points.items())
+
+ return target_data
+
+
+def find_missing(
+ from_data, # type: IndexedPoints
+ from_index, # type: t.List[str]
+ to_data, # type: IndexedPoints
+ to_index, # type: t.List[str]
+ target_indexes, # type: TargetIndexes
+ only_exists, # type: bool
+): # type: (...) -> IndexedPoints
+ """Find coverage in from_data not present in to_data (arcs or lines)."""
+ target_data = {}
+
+ for from_path, from_points in from_data.items():
+ if only_exists and not os.path.isfile(to_bytes(from_path)):
+ continue
+
+ to_points = to_data.get(from_path, {})
+
+ for from_point, from_target_indexes in from_points.items():
+ to_target_indexes = to_points.get(from_point, set())
+
+ remaining_targets = set(from_index[i] for i in from_target_indexes) - set(to_index[i] for i in to_target_indexes)
+
+ if remaining_targets:
+ target_index = target_data.setdefault(from_path, {}).setdefault(from_point, set())
+ target_index.update(get_target_index(name, target_indexes) for name in remaining_targets)
+
+ return target_data
diff --git a/test/lib/ansible_test/_internal/coverage/combine.py b/test/lib/ansible_test/_internal/coverage/combine.py
new file mode 100644
index 00000000..7f726267
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/combine.py
@@ -0,0 +1,303 @@
+"""Combine code coverage files."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..target import (
+ walk_compile_targets,
+ walk_powershell_targets,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ display,
+)
+
+from ..util_common import (
+ ResultType,
+ write_json_file,
+ write_json_test_results,
+)
+
+from . import (
+ enumerate_python_arcs,
+ enumerate_powershell_lines,
+ get_collection_path_regexes,
+ get_python_coverage_files,
+ get_python_modules,
+ get_powershell_coverage_files,
+ initialize_coverage,
+ COVERAGE_OUTPUT_FILE_NAME,
+ COVERAGE_GROUPS,
+ CoverageConfig,
+ PathChecker,
+)
+
+
+def command_coverage_combine(args):
+ """Patch paths in coverage files and merge into a single file.
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args)
+
+ for path in paths:
+ display.info('Generated combined output: %s' % path, verbosity=1)
+
+ return paths
+
+
+def _command_coverage_combine_python(args):
+ """
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ coverage = initialize_coverage(args)
+
+ modules = get_python_modules()
+
+ coverage_files = get_python_coverage_files()
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_compile_targets)
+ groups = _build_stub_groups(args, sources, lambda line_count: set())
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, arcs in enumerate_python_arcs(coverage_file, coverage, modules, collection_search_re, collection_sub_re):
+ if args.export:
+ filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
+
+ if group not in groups:
+ groups[group] = {}
+
+ arc_data = groups[group]
+
+ if filename not in arc_data:
+ arc_data[filename] = set()
+
+ arc_data[filename].update(arcs)
+
+ output_files = []
+
+ if args.export:
+ coverage_file = os.path.join(args.export, '')
+ suffix = '=coverage.combined'
+ else:
+ coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
+ suffix = ''
+
+ path_checker = PathChecker(args, collection_search_re)
+
+ for group in sorted(groups):
+ arc_data = groups[group]
+
+ updated = coverage.CoverageData()
+
+ for filename in arc_data:
+ if not path_checker.check_path(filename):
+ continue
+
+ updated.add_arcs({filename: list(arc_data[filename])})
+
+ if args.all:
+ updated.add_arcs(dict((source[0], []) for source in sources))
+
+ if not args.explain:
+ output_file = coverage_file + group + suffix
+ updated.write_file(output_file) # always write files to make sure stale files do not exist
+
+ if updated:
+ # only report files which are non-empty to prevent coverage from reporting errors
+ output_files.append(output_file)
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _command_coverage_combine_powershell(args):
+ """
+ :type args: CoverageConfig
+ :rtype: list[str]
+ """
+ coverage_files = get_powershell_coverage_files()
+
+ def _default_stub_value(lines):
+ val = {}
+ for line in range(lines):
+ val[line] = 0
+ return val
+
+ counter = 0
+ sources = _get_coverage_targets(args, walk_powershell_targets)
+ groups = _build_stub_groups(args, sources, _default_stub_value)
+
+ collection_search_re, collection_sub_re = get_collection_path_regexes()
+
+ for coverage_file in coverage_files:
+ counter += 1
+ display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
+
+ group = get_coverage_group(args, coverage_file)
+
+ if group is None:
+ display.warning('Unexpected name for coverage file: %s' % coverage_file)
+ continue
+
+ for filename, hits in enumerate_powershell_lines(coverage_file, collection_search_re, collection_sub_re):
+ if args.export:
+ filename = os.path.relpath(filename) # exported paths must be relative since absolute paths may differ between systems
+
+ if group not in groups:
+ groups[group] = {}
+
+ coverage_data = groups[group]
+
+ if filename not in coverage_data:
+ coverage_data[filename] = {}
+
+ file_coverage = coverage_data[filename]
+
+ for line_no, hit_count in hits.items():
+ file_coverage[line_no] = file_coverage.get(line_no, 0) + hit_count
+
+ output_files = []
+
+ path_checker = PathChecker(args)
+
+ for group in sorted(groups):
+ coverage_data = dict((filename, data) for filename, data in groups[group].items() if path_checker.check_path(filename))
+
+ if args.all:
+ # Add 0 line entries for files not in coverage_data
+ for source, source_line_count in sources:
+ if source in coverage_data:
+ continue
+
+ coverage_data[source] = _default_stub_value(source_line_count)
+
+ if not args.explain:
+ if args.export:
+ output_file = os.path.join(args.export, group + '=coverage.combined')
+ write_json_file(output_file, coverage_data, formatted=False)
+ output_files.append(output_file)
+ continue
+
+ output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
+
+ write_json_test_results(ResultType.COVERAGE, output_file, coverage_data, formatted=False)
+
+ output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
+
+ path_checker.report()
+
+ return sorted(output_files)
+
+
+def _get_coverage_targets(args, walk_func):
+ """
+ :type args: CoverageConfig
+ :type walk_func: Func
+ :rtype: list[tuple[str, int]]
+ """
+ sources = []
+
+ if args.all or args.stub:
+ # excludes symlinks of regular files to avoid reporting on the same file multiple times
+ # in the future it would be nice to merge any coverage for symlinks into the real files
+ for target in walk_func(include_symlinks=False):
+ target_path = os.path.abspath(target.path)
+
+ target_lines = len(read_text_file(target_path).splitlines())
+
+ sources.append((target_path, target_lines))
+
+ sources.sort()
+
+ return sources
+
+
+def _build_stub_groups(args, sources, default_stub_value):
+ """
+ :type args: CoverageConfig
+ :type sources: List[tuple[str, int]]
+ :type default_stub_value: Func[int]
+ :rtype: dict
+ """
+ groups = {}
+
+ if args.stub:
+ stub_group = []
+ stub_groups = [stub_group]
+ stub_line_limit = 500000
+ stub_line_count = 0
+
+ for source, source_line_count in sources:
+ stub_group.append((source, source_line_count))
+ stub_line_count += source_line_count
+
+ if stub_line_count > stub_line_limit:
+ stub_line_count = 0
+ stub_group = []
+ stub_groups.append(stub_group)
+
+ for stub_index, stub_group in enumerate(stub_groups):
+ if not stub_group:
+ continue
+
+ groups['=stub-%02d' % (stub_index + 1)] = dict((source, default_stub_value(line_count))
+ for source, line_count in stub_group)
+
+ return groups
+
+
+def get_coverage_group(args, coverage_file):
+ """
+ :type args: CoverageConfig
+ :type coverage_file: str
+ :rtype: str
+ """
+ parts = os.path.basename(coverage_file).split('=', 4)
+
+ # noinspection PyTypeChecker
+ if len(parts) != 5 or not parts[4].startswith('coverage.'):
+ return None
+
+ names = dict(
+ command=parts[0],
+ target=parts[1],
+ environment=parts[2],
+ version=parts[3],
+ )
+
+ export_names = dict(
+ version=parts[3],
+ )
+
+ group = ''
+
+ for part in COVERAGE_GROUPS:
+ if part in args.group_by:
+ group += '=%s' % names[part]
+ elif args.export:
+ group += '=%s' % export_names.get(part, 'various')
+
+ if args.export:
+ group = group.lstrip('=')
+
+ return group
diff --git a/test/lib/ansible_test/_internal/coverage/erase.py b/test/lib/ansible_test/_internal/coverage/erase.py
new file mode 100644
index 00000000..92d241c7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/erase.py
@@ -0,0 +1,27 @@
+"""Erase code coverage files."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..util_common import (
+ ResultType,
+)
+
+from . import (
+ CoverageConfig,
+)
+
+
+def command_coverage_erase(args): # type: (CoverageConfig) -> None
+ """Erase code coverage data files collected during test runs."""
+ coverage_dir = ResultType.COVERAGE.path
+
+ for name in os.listdir(coverage_dir):
+ if not name.startswith('coverage') and '=coverage.' not in name:
+ continue
+
+ path = os.path.join(coverage_dir, name)
+
+ if not args.explain:
+ os.remove(path)
diff --git a/test/lib/ansible_test/_internal/coverage/html.py b/test/lib/ansible_test/_internal/coverage/html.py
new file mode 100644
index 00000000..63956a19
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/html.py
@@ -0,0 +1,45 @@
+"""Generate HTML code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ make_dirs,
+)
+
+from ..util import (
+ display,
+)
+
+from ..util_common import (
+ ResultType,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_html(args):
+ """
+ :type args: CoverageConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ if output_file.endswith('-powershell'):
+ # coverage.py does not support non-Python files so we just skip the local html report.
+ display.info("Skipping output file %s in html generation" % output_file, verbosity=3)
+ continue
+
+ dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file))
+ make_dirs(dir_name)
+ run_coverage(args, output_file, 'html', ['-i', '-d', dir_name])
+
+ display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html'))
diff --git a/test/lib/ansible_test/_internal/coverage/report.py b/test/lib/ansible_test/_internal/coverage/report.py
new file mode 100644
index 00000000..24efa637
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/report.py
@@ -0,0 +1,156 @@
+"""Generate console code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..io import (
+ read_json_file,
+)
+
+from ..util import (
+ display,
+)
+
+from ..data import (
+ data_context,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_report(args):
+ """
+ :type args: CoverageReportConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ if args.group_by or args.stub:
+ display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
+
+ if output_file.endswith('-powershell'):
+ display.info(_generate_powershell_output_report(args, output_file))
+ else:
+ options = []
+
+ if args.show_missing:
+ options.append('--show-missing')
+
+ if args.include:
+ options.extend(['--include', args.include])
+
+ if args.omit:
+ options.extend(['--omit', args.omit])
+
+ run_coverage(args, output_file, 'report', options)
+
+
+def _generate_powershell_output_report(args, coverage_file):
+ """
+ :type args: CoverageReportConfig
+ :type coverage_file: str
+ :rtype: str
+ """
+ coverage_info = read_json_file(coverage_file)
+
+ root_path = data_context().content.root + '/'
+
+ name_padding = 7
+ cover_padding = 8
+
+ file_report = []
+ total_stmts = 0
+ total_miss = 0
+
+ for filename in sorted(coverage_info.keys()):
+ hit_info = coverage_info[filename]
+
+ if filename.startswith(root_path):
+ filename = filename[len(root_path):]
+
+ if args.omit and filename in args.omit:
+ continue
+ if args.include and filename not in args.include:
+ continue
+
+ stmts = len(hit_info)
+ miss = len([c for c in hit_info.values() if c == 0])
+
+ name_padding = max(name_padding, len(filename) + 3)
+
+ total_stmts += stmts
+ total_miss += miss
+
+ cover = "{0}%".format(int((stmts - miss) / stmts * 100))
+
+ missing = []
+ current_missing = None
+ sorted_lines = sorted([int(x) for x in hit_info.keys()])
+ for idx, line in enumerate(sorted_lines):
+ hit = hit_info[str(line)]
+ if hit == 0 and current_missing is None:
+ current_missing = line
+ elif hit != 0 and current_missing is not None:
+ end_line = sorted_lines[idx - 1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+ current_missing = None
+
+ if current_missing is not None:
+ end_line = sorted_lines[-1]
+ if current_missing == end_line:
+ missing.append(str(current_missing))
+ else:
+ missing.append('%s-%s' % (current_missing, end_line))
+
+ file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
+
+ if total_stmts == 0:
+ return ''
+
+ total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
+ stmts_padding = max(8, len(str(total_stmts)))
+ miss_padding = max(7, len(str(total_miss)))
+
+ line_length = name_padding + stmts_padding + miss_padding + cover_padding
+
+ header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
+ 'Cover'.rjust(cover_padding)
+
+ if args.show_missing:
+ header += 'Lines Missing'.rjust(16)
+ line_length += 16
+
+ line_break = '-' * line_length
+ lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
+ str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
+ ' ' + ', '.join(f['missing']) if args.show_missing else '')
+ for f in file_report]
+ totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
+ str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
+
+ report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
+ return report
+
+
+class CoverageReportConfig(CoverageConfig):
+ """Configuration for the coverage report command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(CoverageReportConfig, self).__init__(args)
+
+ self.show_missing = args.show_missing # type: bool
+ self.include = args.include # type: str
+ self.omit = args.omit # type: str
diff --git a/test/lib/ansible_test/_internal/coverage/xml.py b/test/lib/ansible_test/_internal/coverage/xml.py
new file mode 100644
index 00000000..94b5abc5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage/xml.py
@@ -0,0 +1,191 @@
+"""Generate XML code coverage reports."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+
+from xml.etree.ElementTree import (
+ Comment,
+ Element,
+ SubElement,
+ tostring,
+)
+
+from xml.dom import (
+ minidom,
+)
+
+from ..io import (
+ make_dirs,
+ read_json_file,
+)
+
+from ..util_common import (
+ ResultType,
+ write_text_test_results,
+)
+
+from ..env import (
+ get_ansible_version,
+)
+
+from ..data import (
+ data_context,
+)
+
+from .combine import (
+ command_coverage_combine,
+)
+
+from . import (
+ run_coverage,
+ CoverageConfig,
+)
+
+
+def command_coverage_xml(args):
+ """
+ :type args: CoverageConfig
+ """
+ output_files = command_coverage_combine(args)
+
+ for output_file in output_files:
+ xml_name = '%s.xml' % os.path.basename(output_file)
+ if output_file.endswith('-powershell'):
+ report = _generate_powershell_xml(output_file)
+
+ rough_string = tostring(report, 'utf-8')
+ reparsed = minidom.parseString(rough_string)
+ pretty = reparsed.toprettyxml(indent=' ')
+
+ write_text_test_results(ResultType.REPORTS, xml_name, pretty)
+ else:
+ xml_path = os.path.join(ResultType.REPORTS.path, xml_name)
+ make_dirs(ResultType.REPORTS.path)
+ run_coverage(args, output_file, 'xml', ['-i', '-o', xml_path])
+
+
+def _generate_powershell_xml(coverage_file):
+ """
+ :type coverage_file: str
+ :rtype: Element
+ """
+ coverage_info = read_json_file(coverage_file)
+
+ content_root = data_context().content.root
+ is_ansible = data_context().content.is_ansible
+
+ packages = {}
+ for path, results in coverage_info.items():
+ filename = os.path.splitext(os.path.basename(path))[0]
+
+ if filename.startswith('Ansible.ModuleUtils'):
+ package = 'ansible.module_utils'
+ elif is_ansible:
+ package = 'ansible.modules'
+ else:
+ rel_path = path[len(content_root) + 1:]
+ plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils"
+ package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type)
+
+ if package not in packages:
+ packages[package] = {}
+
+ packages[package][path] = results
+
+ elem_coverage = Element('coverage')
+ elem_coverage.append(
+ Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ '))
+ elem_coverage.append(
+ Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd '))
+
+ elem_sources = SubElement(elem_coverage, 'sources')
+
+ elem_source = SubElement(elem_sources, 'source')
+ elem_source.text = data_context().content.root
+
+ elem_packages = SubElement(elem_coverage, 'packages')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for package_name, package_data in packages.items():
+ lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_coverage.attrib.update({
+ 'branch-rate': '0',
+ 'branches-covered': '0',
+ 'branches-valid': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'lines-covered': str(total_line_count),
+ 'lines-valid': str(total_lines_hit),
+ 'timestamp': str(int(time.time())),
+ 'version': get_ansible_version(),
+ })
+
+ return elem_coverage
+
+
+def _add_cobertura_package(packages, package_name, package_data):
+ """
+ :type packages: SubElement
+ :type package_name: str
+ :type package_data: Dict[str, Dict[str, int]]
+ :rtype: Tuple[int, int]
+ """
+ elem_package = SubElement(packages, 'package')
+ elem_classes = SubElement(elem_package, 'classes')
+
+ total_lines_hit = 0
+ total_line_count = 0
+
+ for path, results in package_data.items():
+ lines_hit = len([True for hits in results.values() if hits])
+ line_count = len(results)
+
+ total_lines_hit += lines_hit
+ total_line_count += line_count
+
+ elem_class = SubElement(elem_classes, 'class')
+
+ class_name = os.path.splitext(os.path.basename(path))[0]
+ if class_name.startswith("Ansible.ModuleUtils"):
+ class_name = class_name[20:]
+
+ content_root = data_context().content.root
+ filename = path
+ if filename.startswith(content_root):
+ filename = filename[len(content_root) + 1:]
+
+ elem_class.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'filename': filename,
+ 'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
+ 'name': class_name,
+ })
+
+ SubElement(elem_class, 'methods')
+
+ elem_lines = SubElement(elem_class, 'lines')
+
+ for number, hits in results.items():
+ elem_line = SubElement(elem_lines, 'line')
+ elem_line.attrib.update(
+ hits=str(hits),
+ number=str(number),
+ )
+
+ elem_package.attrib.update({
+ 'branch-rate': '0',
+ 'complexity': '0',
+ 'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
+ 'name': package_name,
+ })
+
+ return total_lines_hit, total_line_count
diff --git a/test/lib/ansible_test/_internal/coverage_util.py b/test/lib/ansible_test/_internal/coverage_util.py
new file mode 100644
index 00000000..e5434231
--- /dev/null
+++ b/test/lib/ansible_test/_internal/coverage_util.py
@@ -0,0 +1,125 @@
+"""Utility code for facilitating collection of code coverage when running tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import os
+import tempfile
+
+from .config import (
+ IntegrationConfig,
+ SanityConfig,
+ TestConfig,
+)
+
+from .io import (
+ write_text_file,
+)
+
+from .util import (
+ COVERAGE_CONFIG_NAME,
+ remove_tree,
+)
+
+from .data import (
+ data_context,
+)
+
+
+@contextlib.contextmanager
+def coverage_context(args): # type: (TestConfig) -> None
+ """Content to set up and clean up code coverage configuration for tests."""
+ coverage_setup(args)
+
+ try:
+ yield
+ finally:
+ coverage_cleanup(args)
+
+
+def coverage_setup(args): # type: (TestConfig) -> None
+ """Set up code coverage configuration before running tests."""
+ if not args.coverage:
+ return
+
+ coverage_config = generate_coverage_config(args)
+
+ if args.explain:
+ args.coverage_config_base_path = '/tmp/coverage-temp-dir'
+ else:
+ args.coverage_config_base_path = tempfile.mkdtemp()
+
+ write_text_file(os.path.join(args.coverage_config_base_path, COVERAGE_CONFIG_NAME), coverage_config)
+
+
+def coverage_cleanup(args): # type: (TestConfig) -> None
+ """Clean up code coverage configuration after tests have finished."""
+ if args.coverage_config_base_path and not args.explain:
+ remove_tree(args.coverage_config_base_path)
+ args.coverage_config_base_path = None
+
+
+def generate_coverage_config(args): # type: (TestConfig) -> str
+ """Generate code coverage configuration for tests."""
+ if data_context().content.collection:
+ coverage_config = generate_collection_coverage_config(args)
+ else:
+ coverage_config = generate_ansible_coverage_config()
+
+ return coverage_config
+
+
+def generate_ansible_coverage_config(): # type: () -> str
+ """Generate code coverage configuration for Ansible tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+
+omit =
+ */python*/dist-packages/*
+ */python*/site-packages/*
+ */python*/distutils/*
+ */pyshared/*
+ */pytest
+ */AnsiballZ_*.py
+ */test/results/*
+'''
+
+ return coverage_config
+
+
+def generate_collection_coverage_config(args): # type: (TestConfig) -> str
+ """Generate code coverage configuration for Ansible Collection tests."""
+ coverage_config = '''
+[run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+disable_warnings =
+ no-data-collected
+'''
+
+ if isinstance(args, IntegrationConfig):
+ coverage_config += '''
+include =
+ %s/*
+ */%s/*
+''' % (data_context().content.root, data_context().content.collection.directory)
+ elif isinstance(args, SanityConfig):
+ # temporary work-around for import sanity test
+ coverage_config += '''
+include =
+ %s/*
+
+omit =
+ %s/*
+''' % (data_context().content.root, os.path.join(data_context().content.root, data_context().content.results_path))
+ else:
+ coverage_config += '''
+include =
+ %s/*
+''' % data_context().content.root
+
+ return coverage_config
diff --git a/test/lib/ansible_test/_internal/csharp_import_analysis.py b/test/lib/ansible_test/_internal/csharp_import_analysis.py
new file mode 100644
index 00000000..daa8892c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/csharp_import_analysis.py
@@ -0,0 +1,106 @@
+"""Analyze C# import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .io import (
+ open_text_file,
+)
+
+from .util import (
+ display,
+)
+
+from .util_common import (
+ resolve_csharp_ps_util,
+)
+
+from .data import (
+ data_context,
+)
+
+
+def get_csharp_module_utils_imports(powershell_targets, csharp_targets):
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths.
+ :type powershell_targets: list[TestTarget] - C# files
+ :type csharp_targets: list[TestTarget] - PS files
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
+
+ for target in csharp_targets:
+ imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
+
+ imports = dict([(module_util, set()) for module_util in module_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_csharp_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_csharp_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ return set(get_csharp_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path)
+ if os.path.splitext(p)[1] == '.cs')
+
+
+def extract_csharp_module_utils_imports(path, module_utils, is_pure_csharp):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :type is_pure_csharp: bool
+ :rtype: set[str]
+ """
+ imports = set()
+ if is_pure_csharp:
+ pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$')
+ else:
+ pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)')
+
+ with open_text_file(path) as module_file:
+ for line_number, line in enumerate(module_file, 1):
+ match = re.search(pattern, line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/data.py b/test/lib/ansible_test/_internal/data.py
new file mode 100644
index 00000000..38ae6d21
--- /dev/null
+++ b/test/lib/ansible_test/_internal/data.py
@@ -0,0 +1,200 @@
+"""Context information for the current invocation of ansible-test."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+ import_plugins,
+ is_subdir,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ ANSIBLE_SOURCE_ROOT,
+ display,
+)
+
+from .provider import (
+ find_path_provider,
+ get_path_provider_classes,
+ ProviderNotFoundForPath,
+)
+
+from .provider.source import (
+ SourceProvider,
+)
+
+from .provider.source.unversioned import (
+ UnversionedSource,
+)
+
+from .provider.source.installed import (
+ InstalledSource,
+)
+
+from .provider.layout import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class DataContext:
+ """Data context providing details about the current execution environment for ansible-test."""
+ def __init__(self):
+ content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
+ current_path = os.getcwd()
+
+ layout_providers = get_path_provider_classes(LayoutProvider)
+ source_providers = get_path_provider_classes(SourceProvider)
+
+ self.__layout_providers = layout_providers
+ self.__source_providers = source_providers
+ self.__ansible_source = None # type: t.Optional[t.Tuple[t.Tuple[str, str], ...]]
+
+ self.payload_callbacks = [] # type: t.List[t.Callable[t.List[t.Tuple[str, str]], None]]
+
+ if content_path:
+ content = self.__create_content_layout(layout_providers, source_providers, content_path, False)
+ elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
+ content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
+ else:
+ content = self.__create_content_layout(layout_providers, source_providers, current_path, True)
+
+ self.content = content # type: ContentLayout
+
+ def create_collection_layouts(self): # type: () -> t.List[ContentLayout]
+ """
+ Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
+ An empty list is returned if the current content layout is not a collection layout.
+ """
+ layout = self.content
+ collection = layout.collection
+
+ if not collection:
+ return []
+
+ root_path = os.path.join(collection.root, 'ansible_collections')
+ display.info('Scanning collection root: %s' % root_path, verbosity=1)
+ namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
+ collections = []
+
+ for namespace_name in namespace_names:
+ namespace_path = os.path.join(root_path, namespace_name)
+ collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
+
+ for collection_name in collection_names:
+ collection_path = os.path.join(namespace_path, collection_name)
+
+ if collection_path == os.path.join(collection.root, collection.directory):
+ collection_layout = layout
+ else:
+ collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)
+
+ file_count = len(collection_layout.all_files())
+
+ if not file_count:
+ continue
+
+ display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
+ collections.append(collection_layout)
+
+ return collections
+
+ @staticmethod
+ def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvider]]
+ source_providers, # type: t.List[t.Type[SourceProvider]]
+ root, # type: str
+ walk, # type: bool
+ ): # type: (...) -> ContentLayout
+ """Create a content layout using the given providers and root path."""
+ layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
+
+ try:
+ # Begin the search for the source provider at the layout provider root.
+ # This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
+ # Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
+ # It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
+ source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(layout_provider.root)
+
+ layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
+
+ return layout
+
+ def __create_ansible_source(self):
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not ANSIBLE_SOURCE_ROOT:
+ sources = []
+
+ source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
+ sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
+ for path in source_provider.get_paths(source_provider.root))
+
+ return tuple(sources)
+
+ if self.content.is_ansible:
+ return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
+
+ try:
+ source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
+ except ProviderNotFoundForPath:
+ source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
+
+ return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
+
+ @property
+ def ansible_source(self): # type: () -> t.Tuple[t.Tuple[str, str], ...]
+ """Return a tuple of Ansible source files with both absolute and relative paths."""
+ if not self.__ansible_source:
+ self.__ansible_source = self.__create_ansible_source()
+
+ return self.__ansible_source
+
+ def register_payload_callback(self, callback): # type: (t.Callable[t.List[t.Tuple[str, str]], None]) -> None
+ """Register the given payload callback."""
+ self.payload_callbacks.append(callback)
+
+
+def data_init(): # type: () -> DataContext
+ """Initialize provider plugins."""
+ provider_types = (
+ 'layout',
+ 'source',
+ )
+
+ for provider_type in provider_types:
+ import_plugins('provider/%s' % provider_type)
+
+ try:
+ context = DataContext()
+ except ProviderNotFoundForPath:
+ options = [
+ ' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/',
+ ]
+
+ if ANSIBLE_SOURCE_ROOT:
+ options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT)
+
+ raise ApplicationError('''The current working directory must be at or below:
+
+%s
+
+Current working directory: %s''' % ('\n'.join(options), os.getcwd()))
+
+ return context
+
+
+def data_context(): # type: () -> DataContext
+ """Return the current data context."""
+ try:
+ return data_context.instance
+ except AttributeError:
+ data_context.instance = data_init()
+ return data_context.instance
diff --git a/test/lib/ansible_test/_internal/delegation.py b/test/lib/ansible_test/_internal/delegation.py
new file mode 100644
index 00000000..3262dd51
--- /dev/null
+++ b/test/lib/ansible_test/_internal/delegation.py
@@ -0,0 +1,667 @@
+"""Delegate test execution to another environment."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import sys
+import tempfile
+
+from . import types as t
+
+from .io import (
+ make_dirs,
+)
+
+from .executor import (
+ SUPPORTED_PYTHON_VERSIONS,
+ HTTPTESTER_HOSTS,
+ create_shell_command,
+ run_httptester,
+ start_httptester,
+ get_python_interpreter,
+ get_python_version,
+)
+
+from .config import (
+ TestConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ WindowsIntegrationConfig,
+ NetworkIntegrationConfig,
+ ShellConfig,
+ SanityConfig,
+ UnitsConfig,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+)
+
+from .manage_ci import (
+ ManagePosixCI,
+ ManageWindowsCI,
+)
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_ROOT,
+ tempdir,
+)
+
+from .util_common import (
+ run_command,
+ ResultType,
+ create_interpreter_wrapper,
+ get_docker_completion,
+ get_remote_completion,
+)
+
+from .docker_util import (
+ docker_exec,
+ docker_get,
+ docker_pull,
+ docker_put,
+ docker_rm,
+ docker_run,
+ docker_available,
+ docker_network_disconnect,
+ get_docker_networks,
+ get_docker_preferred_network_name,
+ get_docker_hostname,
+ is_docker_user_defined_network,
+)
+
+from .cloud import (
+ get_cloud_providers,
+)
+
+from .target import (
+ IntegrationTarget,
+)
+
+from .data import (
+ data_context,
+)
+
+from .payload import (
+ create_payload,
+)
+
+from .venv import (
+ create_virtual_environment,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+
+def check_delegation_args(args):
+ """
+ :type args: CommonConfig
+ """
+ if not isinstance(args, EnvironmentConfig):
+ return
+
+ if args.docker:
+ get_python_version(args, get_docker_completion(), args.docker_raw)
+ elif args.remote:
+ get_python_version(args, get_remote_completion(), args.remote)
+
+
+def delegate(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ :rtype: bool
+ """
+ if isinstance(args, TestConfig):
+ args.metadata.ci_provider = get_ci_provider().code
+
+ make_dirs(ResultType.TMP.path)
+
+ with tempfile.NamedTemporaryFile(prefix='metadata-', suffix='.json', dir=ResultType.TMP.path) as metadata_fd:
+ args.metadata_path = os.path.join(ResultType.TMP.relative_path, os.path.basename(metadata_fd.name))
+ args.metadata.to_file(args.metadata_path)
+
+ try:
+ return delegate_command(args, exclude, require, integration_targets)
+ finally:
+ args.metadata_path = None
+ else:
+ return delegate_command(args, exclude, require, integration_targets)
+
+
+def delegate_command(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ :rtype: bool
+ """
+ if args.venv:
+ delegate_venv(args, exclude, require, integration_targets)
+ return True
+
+ if args.docker:
+ delegate_docker(args, exclude, require, integration_targets)
+ return True
+
+ if args.remote:
+ delegate_remote(args, exclude, require, integration_targets)
+ return True
+
+ return False
+
+
+def delegate_venv(args, # type: EnvironmentConfig
+ exclude, # type: t.List[str]
+ require, # type: t.List[str]
+ integration_targets, # type: t.Tuple[IntegrationTarget, ...]
+ ): # type: (...) -> None
+ """Delegate ansible-test execution to a virtual environment using venv or virtualenv."""
+ if args.python:
+ versions = (args.python_version,)
+ else:
+ versions = SUPPORTED_PYTHON_VERSIONS
+
+ if args.httptester:
+ needs_httptester = sorted(target.name for target in integration_targets if 'needs/httptester/' in target.aliases)
+
+ if needs_httptester:
+ display.warning('Use --docker or --remote to enable httptester for tests marked "needs/httptester": %s' % ', '.join(needs_httptester))
+
+ if args.venv_system_site_packages:
+ suffix = '-ssp'
+ else:
+ suffix = ''
+
+ venvs = dict((version, os.path.join(ResultType.TMP.path, 'delegation', 'python%s%s' % (version, suffix))) for version in versions)
+ venvs = dict((version, path) for version, path in venvs.items() if create_virtual_environment(args, version, path, args.venv_system_site_packages))
+
+ if not venvs:
+ raise ApplicationError('No usable virtual environment support found.')
+
+ options = {
+ '--venv': 0,
+ '--venv-system-site-packages': 0,
+ }
+
+ with tempdir() as inject_path:
+ for version, path in venvs.items():
+ create_interpreter_wrapper(os.path.join(path, 'bin', 'python'), os.path.join(inject_path, 'python%s' % version))
+
+ python_interpreter = os.path.join(inject_path, 'python%s' % args.python_version)
+
+ cmd = generate_command(args, python_interpreter, ANSIBLE_BIN_PATH, data_context().content.root, options, exclude, require)
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ cmd += ['--coverage-label', 'venv']
+
+ env = common_environment()
+
+ with tempdir() as library_path:
+ # expose ansible and ansible_test to the virtual environment (only required when running from an install)
+ os.symlink(ANSIBLE_LIB_ROOT, os.path.join(library_path, 'ansible'))
+ os.symlink(ANSIBLE_TEST_ROOT, os.path.join(library_path, 'ansible_test'))
+
+ env.update(
+ PATH=inject_path + os.path.pathsep + env['PATH'],
+ PYTHONPATH=library_path,
+ )
+
+ run_command(args, cmd, env=env)
+
+
+def delegate_docker(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ """
+ test_image = args.docker
+ privileged = args.docker_privileged
+
+ if isinstance(args, ShellConfig):
+ use_httptester = args.httptester
+ else:
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
+
+ if use_httptester:
+ docker_pull(args, args.httptester)
+
+ docker_pull(args, test_image)
+
+ httptester_id = None
+ test_id = None
+ success = False
+
+ options = {
+ '--docker': 1,
+ '--docker-privileged': 0,
+ '--docker-util': 1,
+ }
+
+ python_interpreter = get_python_interpreter(args, get_docker_completion(), args.docker_raw)
+
+ pwd = '/root'
+ ansible_root = os.path.join(pwd, 'ansible')
+
+ if data_context().content.collection:
+ content_root = os.path.join(pwd, data_context().content.collection.directory)
+ else:
+ content_root = ansible_root
+
+ remote_results_root = os.path.join(content_root, data_context().content.results_path)
+
+ cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ image_label = args.docker_raw
+ image_label = re.sub('[^a-zA-Z0-9]+', '-', image_label)
+ cmd += ['--coverage-label', 'docker-%s' % image_label]
+
+ if isinstance(args, IntegrationConfig):
+ if not args.allow_destructive:
+ cmd.append('--allow-destructive')
+
+ cmd_options = []
+
+ if isinstance(args, ShellConfig) or (isinstance(args, IntegrationConfig) and args.debug_strategy):
+ cmd_options.append('-it')
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
+ try:
+ create_payload(args, local_source_fd.name)
+
+ if use_httptester:
+ httptester_id = run_httptester(args)
+ else:
+ httptester_id = None
+
+ test_options = [
+ '--detach',
+ '--volume', '/sys/fs/cgroup:/sys/fs/cgroup:ro',
+ '--privileged=%s' % str(privileged).lower(),
+ ]
+
+ if args.docker_memory:
+ test_options.extend([
+ '--memory=%d' % args.docker_memory,
+ '--memory-swap=%d' % args.docker_memory,
+ ])
+
+ docker_socket = '/var/run/docker.sock'
+
+ if args.docker_seccomp != 'default':
+ test_options += ['--security-opt', 'seccomp=%s' % args.docker_seccomp]
+
+ if get_docker_hostname() != 'localhost' or os.path.exists(docker_socket):
+ test_options += ['--volume', '%s:%s' % (docker_socket, docker_socket)]
+
+ if httptester_id:
+ test_options += ['--env', 'HTTPTESTER=1']
+
+ network = get_docker_preferred_network_name(args)
+
+ if not is_docker_user_defined_network(network):
+ # legacy links are required when using the default bridge network instead of user-defined networks
+ for host in HTTPTESTER_HOSTS:
+ test_options += ['--link', '%s:%s' % (httptester_id, host)]
+
+ if isinstance(args, IntegrationConfig):
+ cloud_platforms = get_cloud_providers(args)
+
+ for cloud_platform in cloud_platforms:
+ test_options += cloud_platform.get_docker_run_options()
+
+ test_id = docker_run(args, test_image, options=test_options)[0]
+
+ if args.explain:
+ test_id = 'test_id'
+ else:
+ test_id = test_id.strip()
+
+ # write temporary files to /root since /tmp isn't ready immediately on container start
+ docker_put(args, test_id, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'docker.sh'), '/root/docker.sh')
+ docker_exec(args, test_id, ['/bin/bash', '/root/docker.sh'])
+ docker_put(args, test_id, local_source_fd.name, '/root/test.tgz')
+ docker_exec(args, test_id, ['tar', 'oxzf', '/root/test.tgz', '-C', '/root'])
+
+ # docker images are only expected to have a single python version available
+ if isinstance(args, UnitsConfig) and not args.python:
+ cmd += ['--python', 'default']
+
+ # run unit tests unprivileged to prevent stray writes to the source tree
+ # also disconnect from the network once requirements have been installed
+ if isinstance(args, UnitsConfig):
+ writable_dirs = [
+ os.path.join(content_root, ResultType.JUNIT.relative_path),
+ os.path.join(content_root, ResultType.COVERAGE.relative_path),
+ ]
+
+ docker_exec(args, test_id, ['mkdir', '-p'] + writable_dirs)
+ docker_exec(args, test_id, ['chmod', '777'] + writable_dirs)
+ docker_exec(args, test_id, ['chmod', '755', '/root'])
+ docker_exec(args, test_id, ['chmod', '644', os.path.join(content_root, args.metadata_path)])
+
+ docker_exec(args, test_id, ['useradd', 'pytest', '--create-home'])
+
+ docker_exec(args, test_id, cmd + ['--requirements-mode', 'only'], options=cmd_options)
+
+ networks = get_docker_networks(args, test_id)
+
+ if networks is not None:
+ for network in networks:
+ docker_network_disconnect(args, test_id, network)
+ else:
+ display.warning('Network disconnection is not supported (this is normal under podman). '
+ 'Tests will not be isolated from the network. Network-related tests may misbehave.')
+
+ cmd += ['--requirements-mode', 'skip']
+
+ cmd_options += ['--user', 'pytest']
+
+ try:
+ docker_exec(args, test_id, cmd, options=cmd_options)
+ # docker_exec will throw SubprocessError if not successful
+ # If we make it here, all the prep work earlier and the docker_exec line above were all successful.
+ success = True
+ finally:
+ local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
+
+ remote_test_root = os.path.dirname(remote_results_root)
+ remote_results_name = os.path.basename(remote_results_root)
+ remote_temp_file = os.path.join('/root', remote_results_name + '.tgz')
+
+ make_dirs(local_test_root) # make sure directory exists for collections which have no tests
+
+ with tempfile.NamedTemporaryFile(prefix='ansible-result-', suffix='.tgz') as local_result_fd:
+ docker_exec(args, test_id, ['tar', 'czf', remote_temp_file, '--exclude', ResultType.TMP.name, '-C', remote_test_root, remote_results_name])
+ docker_get(args, test_id, remote_temp_file, local_result_fd.name)
+ run_command(args, ['tar', 'oxzf', local_result_fd.name, '-C', local_test_root])
+ finally:
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+ if test_id:
+ if args.docker_terminate == 'always' or (args.docker_terminate == 'success' and success):
+ docker_rm(args, test_id)
+
+
+def delegate_remote(args, exclude, require, integration_targets):
+ """
+ :type args: EnvironmentConfig
+ :type exclude: list[str]
+ :type require: list[str]
+ :type integration_targets: tuple[IntegrationTarget]
+ """
+ remote = args.parsed_remote
+
+ core_ci = AnsibleCoreCI(args, remote.platform, remote.version, stage=args.remote_stage, provider=args.remote_provider, arch=remote.arch)
+ success = False
+ raw = False
+
+ if isinstance(args, ShellConfig):
+ use_httptester = args.httptester
+ raw = args.raw
+ else:
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in integration_targets)
+
+ if use_httptester and not docker_available():
+ display.warning('Assuming --disable-httptester since `docker` is not available.')
+ use_httptester = False
+
+ httptester_id = None
+ ssh_options = []
+ content_root = None
+
+ try:
+ core_ci.start()
+
+ if use_httptester:
+ httptester_id, ssh_options = start_httptester(args)
+
+ core_ci.wait()
+
+ python_version = get_python_version(args, get_remote_completion(), args.remote)
+
+ if remote.platform == 'windows':
+ # Windows doesn't need the ansible-test fluff, just run the SSH command
+ manage = ManageWindowsCI(core_ci)
+ manage.setup(python_version)
+
+ cmd = ['powershell.exe']
+ elif raw:
+ manage = ManagePosixCI(core_ci)
+ manage.setup(python_version)
+
+ cmd = create_shell_command(['bash'])
+ else:
+ manage = ManagePosixCI(core_ci)
+ pwd = manage.setup(python_version)
+
+ options = {
+ '--remote': 1,
+ }
+
+ python_interpreter = get_python_interpreter(args, get_remote_completion(), args.remote)
+
+ ansible_root = os.path.join(pwd, 'ansible')
+
+ if data_context().content.collection:
+ content_root = os.path.join(pwd, data_context().content.collection.directory)
+ else:
+ content_root = ansible_root
+
+ cmd = generate_command(args, python_interpreter, os.path.join(ansible_root, 'bin'), content_root, options, exclude, require)
+
+ if httptester_id:
+ cmd += ['--inject-httptester']
+
+ if isinstance(args, TestConfig):
+ if args.coverage and not args.coverage_label:
+ cmd += ['--coverage-label', 'remote-%s-%s' % (remote.platform, remote.version)]
+
+ if isinstance(args, IntegrationConfig):
+ if not args.allow_destructive:
+ cmd.append('--allow-destructive')
+
+ # remote instances are only expected to have a single python version available
+ if isinstance(args, UnitsConfig) and not args.python:
+ cmd += ['--python', 'default']
+
+ if isinstance(args, IntegrationConfig):
+ cloud_platforms = get_cloud_providers(args)
+
+ for cloud_platform in cloud_platforms:
+ ssh_options += cloud_platform.get_remote_ssh_options()
+
+ try:
+ manage.ssh(cmd, ssh_options)
+ success = True
+ finally:
+ download = False
+
+ if remote.platform != 'windows':
+ download = True
+
+ if isinstance(args, ShellConfig):
+ if args.raw:
+ download = False
+
+ if download and content_root:
+ local_test_root = os.path.dirname(os.path.join(data_context().content.root, data_context().content.results_path))
+
+ remote_results_root = os.path.join(content_root, data_context().content.results_path)
+ remote_results_name = os.path.basename(remote_results_root)
+ remote_temp_path = os.path.join('/tmp', remote_results_name)
+
+ # AIX cp and GNU cp provide different options, no way could be found to have a common
+ # pattern and achieve the same goal
+ cp_opts = '-hr' if remote.platform in ['aix', 'ibmi'] else '-a'
+
+ manage.ssh('rm -rf {0} && mkdir {0} && cp {1} {2}/* {0}/ && chmod -R a+r {0}'.format(remote_temp_path, cp_opts, remote_results_root))
+ manage.download(remote_temp_path, local_test_root)
+ finally:
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ core_ci.stop()
+
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+
+def generate_command(args, python_interpreter, ansible_bin_path, content_root, options, exclude, require):
+ """
+ :type args: EnvironmentConfig
+ :type python_interpreter: str | None
+ :type ansible_bin_path: str
+ :type content_root: str
+ :type options: dict[str, int]
+ :type exclude: list[str]
+ :type require: list[str]
+ :rtype: list[str]
+ """
+ options['--color'] = 1
+
+ cmd = [os.path.join(ansible_bin_path, 'ansible-test')]
+
+ if python_interpreter:
+ cmd = [python_interpreter] + cmd
+
+ # Force the encoding used during delegation.
+ # This is only needed because ansible-test relies on Python's file system encoding.
+ # Environments that do not have the locale configured are thus unable to work with unicode file paths.
+ # Examples include FreeBSD and some Linux containers.
+ env_vars = dict(
+ LC_ALL='en_US.UTF-8',
+ ANSIBLE_TEST_CONTENT_ROOT=content_root,
+ )
+
+ env_args = ['%s=%s' % (key, env_vars[key]) for key in sorted(env_vars)]
+
+ cmd = ['/usr/bin/env'] + env_args + cmd
+
+ cmd += list(filter_options(args, sys.argv[1:], options, exclude, require))
+ cmd += ['--color', 'yes' if args.color else 'no']
+
+ if args.requirements:
+ cmd += ['--requirements']
+
+ if isinstance(args, ShellConfig):
+ cmd = create_shell_command(cmd)
+ elif isinstance(args, SanityConfig):
+ base_branch = args.base_branch or get_ci_provider().get_base_branch()
+
+ if base_branch:
+ cmd += ['--base-branch', base_branch]
+
+ return cmd
+
+
+def filter_options(args, argv, options, exclude, require):
+ """
+ :type args: EnvironmentConfig
+ :type argv: list[str]
+ :type options: dict[str, int]
+ :type exclude: list[str]
+ :type require: list[str]
+ :rtype: collections.Iterable[str]
+ """
+ options = options.copy()
+
+ options['--requirements'] = 0
+ options['--truncate'] = 1
+ options['--redact'] = 0
+ options['--no-redact'] = 0
+
+ if isinstance(args, TestConfig):
+ options.update({
+ '--changed': 0,
+ '--tracked': 0,
+ '--untracked': 0,
+ '--ignore-committed': 0,
+ '--ignore-staged': 0,
+ '--ignore-unstaged': 0,
+ '--changed-from': 1,
+ '--changed-path': 1,
+ '--metadata': 1,
+ '--exclude': 1,
+ '--require': 1,
+ })
+ elif isinstance(args, SanityConfig):
+ options.update({
+ '--base-branch': 1,
+ })
+
+ if isinstance(args, IntegrationConfig):
+ options.update({
+ '--no-temp-unicode': 0,
+ '--no-pip-check': 0,
+ })
+
+ if isinstance(args, (NetworkIntegrationConfig, WindowsIntegrationConfig)):
+ options.update({
+ '--inventory': 1,
+ })
+
+ remaining = 0
+
+ for arg in argv:
+ if not arg.startswith('-') and remaining:
+ remaining -= 1
+ continue
+
+ remaining = 0
+
+ parts = arg.split('=', 1)
+ key = parts[0]
+
+ if key in options:
+ remaining = options[key] - len(parts) + 1
+ continue
+
+ yield arg
+
+ for arg in args.delegate_args:
+ yield arg
+
+ for target in exclude:
+ yield '--exclude'
+ yield target
+
+ for target in require:
+ yield '--require'
+ yield target
+
+ if isinstance(args, TestConfig):
+ if args.metadata_path:
+ yield '--metadata'
+ yield args.metadata_path
+
+ yield '--truncate'
+ yield '%d' % args.truncate
+
+ if args.redact:
+ yield '--redact'
+ else:
+ yield '--no-redact'
+
+ if isinstance(args, IntegrationConfig):
+ if args.no_temp_unicode:
+ yield '--no-temp-unicode'
+
+ if not args.pip_check:
+ yield '--no-pip-check'
diff --git a/test/lib/ansible_test/_internal/diff.py b/test/lib/ansible_test/_internal/diff.py
new file mode 100644
index 00000000..1e2038b9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/diff.py
@@ -0,0 +1,256 @@
+"""Diff parsing functions and classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import textwrap
+import traceback
+
+from . import types as t
+
+from .util import (
+ ApplicationError,
+)
+
+
+def parse_diff(lines):
+ """
+ :type lines: list[str]
+ :rtype: list[FileDiff]
+ """
+ return DiffParser(lines).files
+
+
+class FileDiff:
+ """Parsed diff for a single file."""
+ def __init__(self, old_path, new_path):
+ """
+ :type old_path: str
+ :type new_path: str
+ """
+ self.old = DiffSide(old_path, new=False)
+ self.new = DiffSide(new_path, new=True)
+ self.headers = [] # type: t.List[str]
+ self.binary = False
+
+ def append_header(self, line):
+ """
+ :type line: str
+ """
+ self.headers.append(line)
+
+ @property
+ def is_complete(self):
+ """
+ :rtype: bool
+ """
+ return self.old.is_complete and self.new.is_complete
+
+
+class DiffSide:
+ """Parsed diff for a single 'side' of a single file."""
+ def __init__(self, path, new):
+ """
+ :type path: str
+ :type new: bool
+ """
+ self.path = path
+ self.new = new
+ self.prefix = '+' if self.new else '-'
+ self.eof_newline = True
+ self.exists = True
+
+ self.lines = [] # type: t.List[t.Tuple[int, str]]
+ self.lines_and_context = [] # type: t.List[t.Tuple[int, str]]
+ self.ranges = [] # type: t.List[t.Tuple[int, int]]
+
+ self._next_line_number = 0
+ self._lines_remaining = 0
+ self._range_start = 0
+
+ def set_start(self, line_start, line_count):
+ """
+ :type line_start: int
+ :type line_count: int
+ """
+ self._next_line_number = line_start
+ self._lines_remaining = line_count
+ self._range_start = 0
+
+ def append(self, line):
+ """
+ :type line: str
+ """
+ if self._lines_remaining <= 0:
+ raise Exception('Diff range overflow.')
+
+ entry = self._next_line_number, line
+
+ if line.startswith(' '):
+ pass
+ elif line.startswith(self.prefix):
+ self.lines.append(entry)
+
+ if not self._range_start:
+ self._range_start = self._next_line_number
+ else:
+ raise Exception('Unexpected diff content prefix.')
+
+ self.lines_and_context.append(entry)
+
+ self._lines_remaining -= 1
+
+ if self._range_start:
+ if self.is_complete:
+ range_end = self._next_line_number
+ elif line.startswith(' '):
+ range_end = self._next_line_number - 1
+ else:
+ range_end = 0
+
+ if range_end:
+ self.ranges.append((self._range_start, range_end))
+ self._range_start = 0
+
+ self._next_line_number += 1
+
+ @property
+ def is_complete(self):
+ """
+ :rtype: bool
+ """
+ return self._lines_remaining == 0
+
+ def format_lines(self, context=True):
+ """
+ :type context: bool
+ :rtype: list[str]
+ """
+ if context:
+ lines = self.lines_and_context
+ else:
+ lines = self.lines
+
+ return ['%s:%4d %s' % (self.path, line[0], line[1]) for line in lines]
+
+
+class DiffParser:
+ """Parse diff lines."""
+ def __init__(self, lines):
+ """
+ :type lines: list[str]
+ """
+ self.lines = lines
+ self.files = [] # type: t.List[FileDiff]
+
+ self.action = self.process_start
+ self.line_number = 0
+ self.previous_line = None # type: t.Optional[str]
+ self.line = None # type: t.Optional[str]
+ self.file = None # type: t.Optional[FileDiff]
+
+ for self.line in self.lines:
+ self.line_number += 1
+
+ try:
+ self.action()
+ except Exception as ex:
+ message = textwrap.dedent('''
+ %s
+
+ Line: %d
+ Previous: %s
+ Current: %s
+ %s
+ ''').strip() % (
+ ex,
+ self.line_number,
+ self.previous_line or '',
+ self.line or '',
+ traceback.format_exc(),
+ )
+
+ raise ApplicationError(message.strip())
+
+ self.previous_line = self.line
+
+ self.complete_file()
+
+ def process_start(self):
+ """Process a diff start line."""
+ self.complete_file()
+
+ match = re.search(r'^diff --git "?a/(?P<old_path>.*)"? "?b/(?P<new_path>.*)"?$', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff start line.')
+
+ self.file = FileDiff(match.group('old_path'), match.group('new_path'))
+ self.action = self.process_continue
+
+ def process_range(self):
+ """Process a diff range line."""
+ match = re.search(r'^@@ -((?P<old_start>[0-9]+),)?(?P<old_count>[0-9]+) \+((?P<new_start>[0-9]+),)?(?P<new_count>[0-9]+) @@', self.line)
+
+ if not match:
+ raise Exception('Unexpected diff range line.')
+
+ self.file.old.set_start(int(match.group('old_start') or 1), int(match.group('old_count')))
+ self.file.new.set_start(int(match.group('new_start') or 1), int(match.group('new_count')))
+ self.action = self.process_content
+
+ def process_continue(self):
+ """Process a diff start, range or header line."""
+ if self.line.startswith('diff '):
+ self.process_start()
+ elif self.line.startswith('@@ '):
+ self.process_range()
+ else:
+ self.process_header()
+
+ def process_header(self):
+ """Process a diff header line."""
+ if self.line.startswith('Binary files '):
+ self.file.binary = True
+ elif self.line == '--- /dev/null':
+ self.file.old.exists = False
+ elif self.line == '+++ /dev/null':
+ self.file.new.exists = False
+ else:
+ self.file.append_header(self.line)
+
+ def process_content(self):
+ """Process a diff content line."""
+ if self.line == r'\ No newline at end of file':
+ if self.previous_line.startswith(' '):
+ self.file.old.eof_newline = False
+ self.file.new.eof_newline = False
+ elif self.previous_line.startswith('-'):
+ self.file.old.eof_newline = False
+ elif self.previous_line.startswith('+'):
+ self.file.new.eof_newline = False
+ else:
+ raise Exception('Unexpected previous diff content line.')
+
+ return
+
+ if self.file.is_complete:
+ self.process_continue()
+ return
+
+ if self.line.startswith(' '):
+ self.file.old.append(self.line)
+ self.file.new.append(self.line)
+ elif self.line.startswith('-'):
+ self.file.old.append(self.line)
+ elif self.line.startswith('+'):
+ self.file.new.append(self.line)
+ else:
+ raise Exception('Unexpected diff content line.')
+
+ def complete_file(self):
+ """Complete processing of the current file, if any."""
+ if not self.file:
+ return
+
+ self.files.append(self.file)
diff --git a/test/lib/ansible_test/_internal/docker_util.py b/test/lib/ansible_test/_internal/docker_util.py
new file mode 100644
index 00000000..1b47364d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/docker_util.py
@@ -0,0 +1,409 @@
+"""Functions for accessing docker via the docker cli."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import time
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ common_environment,
+ display,
+ find_executable,
+ SubprocessError,
+)
+
+from .http import (
+ urlparse,
+)
+
+from .util_common import (
+ run_command,
+)
+
+from .config import (
+ EnvironmentConfig,
+)
+
+BUFFER_SIZE = 256 * 256
+
+
+def docker_available():
+ """
+ :rtype: bool
+ """
+ return find_executable('docker', required=False)
+
+
+def get_docker_hostname(): # type: () -> str
+ """Return the hostname of the Docker service."""
+ try:
+ return get_docker_hostname.hostname
+ except AttributeError:
+ pass
+
+ docker_host = os.environ.get('DOCKER_HOST')
+
+ if docker_host and docker_host.startswith('tcp://'):
+ try:
+ hostname = urlparse(docker_host)[1].split(':')[0]
+ display.info('Detected Docker host: %s' % hostname, verbosity=1)
+ except ValueError:
+ hostname = 'localhost'
+ display.warning('Could not parse DOCKER_HOST environment variable "%s", falling back to localhost.' % docker_host)
+ else:
+ hostname = 'localhost'
+ display.info('Assuming Docker is available on localhost.', verbosity=1)
+
+ get_docker_hostname.hostname = hostname
+
+ return hostname
+
+
+def get_docker_container_id():
+ """
+ :rtype: str | None
+ """
+ try:
+ return get_docker_container_id.container_id
+ except AttributeError:
+ pass
+
+ path = '/proc/self/cpuset'
+ container_id = None
+
+ if os.path.exists(path):
+ # File content varies based on the environment:
+ # No Container: /
+ # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
+ # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
+ # Podman: /../../../../../..
+ contents = read_text_file(path)
+
+ cgroup_path, cgroup_name = os.path.split(contents.strip())
+
+ if cgroup_path in ('/docker', '/azpl_job'):
+ container_id = cgroup_name
+
+ get_docker_container_id.container_id = container_id
+
+ if container_id:
+ display.info('Detected execution in Docker container: %s' % container_id, verbosity=1)
+
+ return container_id
+
+
+def get_docker_container_ip(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :rtype: str
+ """
+ results = docker_inspect(args, container_id)
+ network_settings = results[0]['NetworkSettings']
+ networks = network_settings.get('Networks')
+
+ if networks:
+ network_name = get_docker_preferred_network_name(args)
+ ipaddress = networks[network_name]['IPAddress']
+ else:
+ # podman doesn't provide Networks, fall back to using IPAddress
+ ipaddress = network_settings['IPAddress']
+
+ if not ipaddress:
+ raise ApplicationError('Cannot retrieve IP address for container: %s' % container_id)
+
+ return ipaddress
+
+
+def get_docker_network_name(args, container_id): # type: (EnvironmentConfig, str) -> str
+ """
+ Return the network name of the specified container.
+ Raises an exception if zero or more than one network is found.
+ """
+ networks = get_docker_networks(args, container_id)
+
+ if not networks:
+ raise ApplicationError('No network found for Docker container: %s.' % container_id)
+
+ if len(networks) > 1:
+ raise ApplicationError('Found multiple networks for Docker container %s instead of only one: %s' % (container_id, ', '.join(networks)))
+
+ return networks[0]
+
+
+def get_docker_preferred_network_name(args): # type: (EnvironmentConfig) -> str
+ """
+ Return the preferred network name for use with Docker. The selection logic is:
+ - the network selected by the user with `--docker-network`
+ - the network of the currently running docker container (if any)
+ - the default docker network (returns None)
+ """
+ network = None
+
+ if args.docker_network:
+ network = args.docker_network
+ else:
+ current_container_id = get_docker_container_id()
+
+ if current_container_id:
+ # Make sure any additional containers we launch use the same network as the current container we're running in.
+ # This is needed when ansible-test is running in a container that is not connected to Docker's default network.
+ network = get_docker_network_name(args, current_container_id)
+
+ return network
+
+
+def is_docker_user_defined_network(network): # type: (str) -> bool
+ """Return True if the network being used is a user-defined network."""
+ return network and network != 'bridge'
+
+
+def get_docker_networks(args, container_id):
+ """
+ :param args: EnvironmentConfig
+ :param container_id: str
+ :rtype: list[str]
+ """
+ results = docker_inspect(args, container_id)
+ # podman doesn't return Networks- just silently return None if it's missing...
+ networks = results[0]['NetworkSettings'].get('Networks')
+ if networks is None:
+ return None
+ return sorted(networks)
+
+
+def docker_pull(args, image):
+ """
+ :type args: EnvironmentConfig
+ :type image: str
+ """
+ if ('@' in image or ':' in image) and docker_images(args, image):
+ display.info('Skipping docker pull of existing image with tag or digest: %s' % image, verbosity=2)
+ return
+
+ if not args.docker_pull:
+ display.warning('Skipping docker pull for "%s". Image may be out-of-date.' % image)
+ return
+
+ for _iteration in range(1, 10):
+ try:
+ docker_command(args, ['pull', image])
+ return
+ except SubprocessError:
+ display.warning('Failed to pull docker image "%s". Waiting a few seconds before trying again.' % image)
+ time.sleep(3)
+
+ raise ApplicationError('Failed to pull docker image "%s".' % image)
+
+
+def docker_put(args, container_id, src, dst):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type src: str
+ :type dst: str
+ """
+ # avoid 'docker cp' due to a bug which causes 'docker rm' to fail
+ with open_binary_file(src) as src_fd:
+ docker_exec(args, container_id, ['dd', 'of=%s' % dst, 'bs=%s' % BUFFER_SIZE],
+ options=['-i'], stdin=src_fd, capture=True)
+
+
+def docker_get(args, container_id, src, dst):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type src: str
+ :type dst: str
+ """
+ # avoid 'docker cp' due to a bug which causes 'docker rm' to fail
+ with open_binary_file(dst, 'wb') as dst_fd:
+ docker_exec(args, container_id, ['dd', 'if=%s' % src, 'bs=%s' % BUFFER_SIZE],
+ options=['-i'], stdout=dst_fd, capture=True)
+
+
+def docker_run(args, image, options, cmd=None):
+ """
+ :type args: EnvironmentConfig
+ :type image: str
+ :type options: list[str] | None
+ :type cmd: list[str] | None
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ if not cmd:
+ cmd = []
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # Only when the network is not the default bridge network.
+ # Using this with the default bridge network results in an error when using --link: links are only supported for user-defined networks
+ options.extend(['--network', network])
+
+ for _iteration in range(1, 3):
+ try:
+ return docker_command(args, ['run'] + options + [image] + cmd, capture=True)
+ except SubprocessError as ex:
+ display.error(ex)
+ display.warning('Failed to run docker image "%s". Waiting a few seconds before trying again.' % image)
+ time.sleep(3)
+
+ raise ApplicationError('Failed to run docker image "%s".' % image)
+
+
+def docker_images(args, image):
+ """
+ :param args: CommonConfig
+ :param image: str
+ :rtype: list[dict[str, any]]
+ """
+ try:
+ stdout, _dummy = docker_command(args, ['images', image, '--format', '{{json .}}'], capture=True, always=True)
+ except SubprocessError as ex:
+ if 'no such image' in ex.stderr:
+ return [] # podman does not handle this gracefully, exits 125
+
+ if 'function "json" not defined' in ex.stderr:
+ # podman > 2 && < 2.2.0 breaks with --format {{json .}}, and requires --format json
+ # So we try this as a fallback. If it fails again, we just raise the exception and bail.
+ stdout, _dummy = docker_command(args, ['images', image, '--format', 'json'], capture=True, always=True)
+ else:
+ raise ex
+
+ if stdout.startswith('['):
+ # modern podman outputs a pretty-printed json list. Just load the whole thing.
+ return json.loads(stdout)
+
+ # docker outputs one json object per line (jsonl)
+ return [json.loads(line) for line in stdout.splitlines()]
+
+
+def docker_rm(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ """
+ try:
+ docker_command(args, ['rm', '-f', container_id], capture=True)
+ except SubprocessError as ex:
+ if 'no such container' in ex.stderr:
+ pass # podman does not handle this gracefully, exits 1
+ else:
+ raise ex
+
+
+def docker_inspect(args, container_id):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :rtype: list[dict]
+ """
+ if args.explain:
+ return []
+
+ try:
+ stdout = docker_command(args, ['inspect', container_id], capture=True)[0]
+ return json.loads(stdout)
+ except SubprocessError as ex:
+ if 'no such image' in ex.stderr:
+ return [] # podman does not handle this gracefully, exits 125
+ try:
+ return json.loads(ex.stdout)
+ except Exception:
+ raise ex
+
+
+def docker_network_disconnect(args, container_id, network):
+ """
+ :param args: EnvironmentConfig
+ :param container_id: str
+ :param network: str
+ """
+ docker_command(args, ['network', 'disconnect', network, container_id], capture=True)
+
+
+def docker_network_inspect(args, network):
+ """
+ :type args: EnvironmentConfig
+ :type network: str
+ :rtype: list[dict]
+ """
+ if args.explain:
+ return []
+
+ try:
+ stdout = docker_command(args, ['network', 'inspect', network], capture=True)[0]
+ return json.loads(stdout)
+ except SubprocessError as ex:
+ try:
+ return json.loads(ex.stdout)
+ except Exception:
+ raise ex
+
+
+def docker_exec(args, container_id, cmd, options=None, capture=False, stdin=None, stdout=None):
+ """
+ :type args: EnvironmentConfig
+ :type container_id: str
+ :type cmd: list[str]
+ :type options: list[str] | None
+ :type capture: bool
+ :type stdin: BinaryIO | None
+ :type stdout: BinaryIO | None
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ return docker_command(args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout)
+
+
+def docker_info(args):
+ """
+ :param args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)
+ return json.loads(stdout)
+
+
+def docker_version(args):
+ """
+ :param args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)
+ return json.loads(stdout)
+
+
+def docker_command(args, cmd, capture=False, stdin=None, stdout=None, always=False):
+ """
+ :type args: CommonConfig
+ :type cmd: list[str]
+ :type capture: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type always: bool
+ :rtype: str | None, str | None
+ """
+ env = docker_environment()
+ return run_command(args, ['docker'] + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, always=always)
+
+
+def docker_environment():
+ """
+ :rtype: dict[str, str]
+ """
+ env = common_environment()
+ env.update(dict((key, os.environ[key]) for key in os.environ if key.startswith('DOCKER_')))
+ return env
diff --git a/test/lib/ansible_test/_internal/encoding.py b/test/lib/ansible_test/_internal/encoding.py
new file mode 100644
index 00000000..8e014794
--- /dev/null
+++ b/test/lib/ansible_test/_internal/encoding.py
@@ -0,0 +1,41 @@
+"""Functions for encoding and decoding strings."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import types as t
+
+ENCODING = 'utf-8'
+
+Text = type(u'')
+
+
+def to_optional_bytes(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[bytes]
+ """Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
+ return None if value is None else to_bytes(value, errors)
+
+
+def to_optional_text(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[t.Text]
+ """Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
+ return None if value is None else to_text(value, errors)
+
+
+def to_bytes(value, errors='strict'): # type: (t.AnyStr, str) -> bytes
+ """Return the given value as bytes encoded using UTF-8 if not already bytes."""
+ if isinstance(value, bytes):
+ return value
+
+ if isinstance(value, Text):
+ return value.encode(ENCODING, errors)
+
+ raise Exception('value is not bytes or text: %s' % type(value))
+
+
+def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> t.Text
+ """Return the given value as text decoded using UTF-8 if not already text."""
+ if isinstance(value, bytes):
+ return value.decode(ENCODING, errors)
+
+ if isinstance(value, Text):
+ return value
+
+ raise Exception('value is not bytes or text: %s' % type(value))
diff --git a/test/lib/ansible_test/_internal/env.py b/test/lib/ansible_test/_internal/env.py
new file mode 100644
index 00000000..60c0245e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/env.py
@@ -0,0 +1,293 @@
+"""Show information about the test environment."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import functools
+import os
+import platform
+import signal
+import sys
+import time
+
+from .config import (
+ CommonConfig,
+ TestConfig,
+)
+
+from .io import (
+ write_json_file,
+ read_json_file,
+)
+
+from .util import (
+ display,
+ find_executable,
+ SubprocessError,
+ ApplicationError,
+ get_ansible_version,
+ get_available_python_versions,
+)
+
+from .util_common import (
+ data_context,
+ write_json_test_results,
+ ResultType,
+)
+
+from .docker_util import (
+ docker_info,
+ docker_version
+)
+
+from .thread import (
+ WrappedThread,
+)
+
+from .constants import (
+ TIMEOUT_PATH,
+)
+
+from .test import (
+ TestTimeout,
+)
+
+from .executor import (
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+
+class EnvConfig(CommonConfig):
+ """Configuration for the tools command."""
+ def __init__(self, args):
+ """
+ :type args: any
+ """
+ super(EnvConfig, self).__init__(args, 'env')
+
+ self.show = args.show
+ self.dump = args.dump
+ self.timeout = args.timeout
+ self.list_files = args.list_files
+
+ if not self.show and not self.dump and self.timeout is None and not self.list_files:
+ # default to --show if no options were given
+ self.show = True
+
+
+def command_env(args):
+ """
+ :type args: EnvConfig
+ """
+ show_dump_env(args)
+ list_files_env(args)
+ set_timeout(args)
+
+
+def show_dump_env(args):
+ """
+ :type args: EnvConfig
+ """
+ if not args.show and not args.dump:
+ return
+
+ data = dict(
+ ansible=dict(
+ version=get_ansible_version(),
+ ),
+ docker=get_docker_details(args),
+ environ=os.environ.copy(),
+ location=dict(
+ pwd=os.environ.get('PWD', None),
+ cwd=os.getcwd(),
+ ),
+ git=get_ci_provider().get_git_details(args),
+ platform=dict(
+ datetime=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
+ platform=platform.platform(),
+ uname=platform.uname(),
+ ),
+ python=dict(
+ executable=sys.executable,
+ version=platform.python_version(),
+ ),
+ interpreters=get_available_python_versions(SUPPORTED_PYTHON_VERSIONS),
+ )
+
+ if args.show:
+ verbose = {
+ 'docker': 3,
+ 'docker.executable': 0,
+ 'environ': 2,
+ 'platform.uname': 1,
+ }
+
+ show_dict(data, verbose)
+
+ if args.dump and not args.explain:
+ write_json_test_results(ResultType.BOT, 'data-environment.json', data)
+
+
+def list_files_env(args): # type: (EnvConfig) -> None
+ """List files on stdout."""
+ if not args.list_files:
+ return
+
+ for path in data_context().content.all_files():
+ display.info(path)
+
+
+def set_timeout(args):
+ """
+ :type args: EnvConfig
+ """
+ if args.timeout is None:
+ return
+
+ if args.timeout:
+ deadline = (datetime.datetime.utcnow() + datetime.timedelta(minutes=args.timeout)).strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ display.info('Setting a %d minute test timeout which will end at: %s' % (args.timeout, deadline), verbosity=1)
+ else:
+ deadline = None
+
+ display.info('Clearing existing test timeout.', verbosity=1)
+
+ if args.explain:
+ return
+
+ if deadline:
+ data = dict(
+ duration=args.timeout,
+ deadline=deadline,
+ )
+
+ write_json_file(TIMEOUT_PATH, data)
+ elif os.path.exists(TIMEOUT_PATH):
+ os.remove(TIMEOUT_PATH)
+
+
+def get_timeout():
+ """
+ :rtype: dict[str, any] | None
+ """
+ if not os.path.exists(TIMEOUT_PATH):
+ return None
+
+ data = read_json_file(TIMEOUT_PATH)
+ data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ')
+
+ return data
+
+
+def configure_timeout(args):
+ """
+ :type args: CommonConfig
+ """
+ if isinstance(args, TestConfig):
+ configure_test_timeout(args) # only tests are subject to the timeout
+
+
+def configure_test_timeout(args):
+ """
+ :type args: TestConfig
+ """
+ timeout = get_timeout()
+
+ if not timeout:
+ return
+
+ timeout_start = datetime.datetime.utcnow()
+ timeout_duration = timeout['duration']
+ timeout_deadline = timeout['deadline']
+ timeout_remaining = timeout_deadline - timeout_start
+
+ test_timeout = TestTimeout(timeout_duration)
+
+ if timeout_remaining <= datetime.timedelta():
+ test_timeout.write(args)
+
+ raise ApplicationError('The %d minute test timeout expired %s ago at %s.' % (
+ timeout_duration, timeout_remaining * -1, timeout_deadline))
+
+ display.info('The %d minute test timeout expires in %s at %s.' % (
+ timeout_duration, timeout_remaining, timeout_deadline), verbosity=1)
+
+ def timeout_handler(_dummy1, _dummy2):
+ """Runs when SIGUSR1 is received."""
+ test_timeout.write(args)
+
+ raise ApplicationError('Tests aborted after exceeding the %d minute time limit.' % timeout_duration)
+
+ def timeout_waiter(timeout_seconds):
+ """
+ :type timeout_seconds: int
+ """
+ time.sleep(timeout_seconds)
+ os.kill(os.getpid(), signal.SIGUSR1)
+
+ signal.signal(signal.SIGUSR1, timeout_handler)
+
+ instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.seconds))
+ instance.daemon = True
+ instance.start()
+
+
+def show_dict(data, verbose, root_verbosity=0, path=None):
+ """
+ :type data: dict[str, any]
+ :type verbose: dict[str, int]
+ :type root_verbosity: int
+ :type path: list[str] | None
+ """
+ path = path if path else []
+
+ for key, value in sorted(data.items()):
+ indent = ' ' * len(path)
+ key_path = path + [key]
+ key_name = '.'.join(key_path)
+ verbosity = verbose.get(key_name, root_verbosity)
+
+ if isinstance(value, (tuple, list)):
+ display.info(indent + '%s:' % key, verbosity=verbosity)
+ for item in value:
+ display.info(indent + ' - %s' % item, verbosity=verbosity)
+ elif isinstance(value, dict):
+ min_verbosity = min([verbosity] + [v for k, v in verbose.items() if k.startswith('%s.' % key)])
+ display.info(indent + '%s:' % key, verbosity=min_verbosity)
+ show_dict(value, verbose, verbosity, key_path)
+ else:
+ display.info(indent + '%s: %s' % (key, value), verbosity=verbosity)
+
+
+def get_docker_details(args):
+ """
+ :type args: CommonConfig
+ :rtype: dict[str, any]
+ """
+ docker = find_executable('docker', required=False)
+ info = None
+ version = None
+
+ if docker:
+ try:
+ info = docker_info(args)
+ except SubprocessError as ex:
+ display.warning('Failed to collect docker info:\n%s' % ex)
+
+ try:
+ version = docker_version(args)
+ except SubprocessError as ex:
+ display.warning('Failed to collect docker version:\n%s' % ex)
+
+ docker_details = dict(
+ executable=docker,
+ info=info,
+ version=version,
+ )
+
+ return docker_details
diff --git a/test/lib/ansible_test/_internal/executor.py b/test/lib/ansible_test/_internal/executor.py
new file mode 100644
index 00000000..4f613049
--- /dev/null
+++ b/test/lib/ansible_test/_internal/executor.py
@@ -0,0 +1,2146 @@
+"""Execute Ansible tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import datetime
+import re
+import time
+import textwrap
+import functools
+import hashlib
+import difflib
+import filecmp
+import random
+import string
+import shutil
+
+from . import types as t
+
+from .thread import (
+ WrappedThread,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+ SshKey,
+)
+
+from .manage_ci import (
+ ManageWindowsCI,
+ ManageNetworkCI,
+)
+
+from .cloud import (
+ cloud_filter,
+ cloud_init,
+ get_cloud_environment,
+ get_cloud_platforms,
+ CloudEnvironmentConfig,
+)
+
+from .io import (
+ make_dirs,
+ open_text_file,
+ read_binary_file,
+ read_text_file,
+ write_text_file,
+)
+
+from .util import (
+ ApplicationWarning,
+ ApplicationError,
+ SubprocessError,
+ display,
+ remove_tree,
+ find_executable,
+ raw_command,
+ get_available_port,
+ generate_pip_command,
+ find_python,
+ cmd_quote,
+ ANSIBLE_LIB_ROOT,
+ ANSIBLE_TEST_DATA_ROOT,
+ ANSIBLE_TEST_CONFIG_ROOT,
+ get_ansible_version,
+ tempdir,
+ open_zipfile,
+ SUPPORTED_PYTHON_VERSIONS,
+ str_to_version,
+ version_to_str,
+)
+
+from .util_common import (
+ get_docker_completion,
+ get_network_settings,
+ get_remote_completion,
+ get_python_path,
+ intercept_command,
+ named_temporary_file,
+ run_command,
+ write_json_test_results,
+ ResultType,
+ handle_layout_messages,
+)
+
+from .docker_util import (
+ docker_pull,
+ docker_run,
+ docker_available,
+ docker_rm,
+ get_docker_container_id,
+ get_docker_container_ip,
+ get_docker_hostname,
+ get_docker_preferred_network_name,
+ is_docker_user_defined_network,
+)
+
+from .ansible_util import (
+ ansible_environment,
+ check_pyyaml,
+)
+
+from .target import (
+ IntegrationTarget,
+ walk_internal_targets,
+ walk_posix_integration_targets,
+ walk_network_integration_targets,
+ walk_windows_integration_targets,
+ TIntegrationTarget,
+)
+
+from .ci import (
+ get_ci_provider,
+)
+
+from .classification import (
+ categorize_changes,
+)
+
+from .config import (
+ TestConfig,
+ EnvironmentConfig,
+ IntegrationConfig,
+ NetworkIntegrationConfig,
+ PosixIntegrationConfig,
+ ShellConfig,
+ WindowsIntegrationConfig,
+ TIntegrationConfig,
+)
+
+from .metadata import (
+ ChangeDescription,
+)
+
+from .integration import (
+ integration_test_environment,
+ integration_test_config_file,
+ setup_common_temp_dir,
+ get_inventory_relative_path,
+ check_inventory,
+ delegate_inventory,
+)
+
+from .data import (
+ data_context,
+)
+
+HTTPTESTER_HOSTS = (
+ 'ansible.http.tests',
+ 'sni1.ansible.http.tests',
+ 'fail.ansible.http.tests',
+)
+
+
+def check_startup():
+ """Checks to perform at startup before running commands."""
+ check_legacy_modules()
+
+
+def check_legacy_modules():
+ """Detect conflicts with legacy core/extras module directories to avoid problems later."""
+ for directory in 'core', 'extras':
+ path = 'lib/ansible/modules/%s' % directory
+
+ for root, _dir_names, file_names in os.walk(path):
+ if file_names:
+ # the directory shouldn't exist, but if it does, it must contain no files
+ raise ApplicationError('Files prohibited in "%s". '
+ 'These are most likely legacy modules from version 2.2 or earlier.' % root)
+
+
+def create_shell_command(command):
+ """
+ :type command: list[str]
+ :rtype: list[str]
+ """
+ optional_vars = (
+ 'TERM',
+ )
+
+ cmd = ['/usr/bin/env']
+ cmd += ['%s=%s' % (var, os.environ[var]) for var in optional_vars if var in os.environ]
+ cmd += command
+
+ return cmd
+
+
+def get_openssl_version(args, python, python_version): # type: (EnvironmentConfig, str, str) -> t.Optional[t.Tuple[int, ...]]
+ """Return the openssl version."""
+ if not python_version.startswith('2.'):
+ # OpenSSL version checking only works on Python 3.x.
+ # This should be the most accurate, since it is the Python we will be using.
+ version = json.loads(run_command(args, [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sslcheck.py')], capture=True, always=True)[0])['version']
+
+ if version:
+ display.info('Detected OpenSSL version %s under Python %s.' % (version_to_str(version), python_version), verbosity=1)
+
+ return tuple(version)
+
+ # Fall back to detecting the OpenSSL version from the CLI.
+ # This should provide an adequate solution on Python 2.x.
+ openssl_path = find_executable('openssl', required=False)
+
+ if openssl_path:
+ try:
+ result = raw_command([openssl_path, 'version'], capture=True)[0]
+ except SubprocessError:
+ result = ''
+
+ match = re.search(r'^OpenSSL (?P<version>[0-9]+\.[0-9]+\.[0-9]+)', result)
+
+ if match:
+ version = str_to_version(match.group('version'))
+
+ display.info('Detected OpenSSL version %s using the openssl CLI.' % version_to_str(version), verbosity=1)
+
+ return version
+
+ display.info('Unable to detect OpenSSL version.', verbosity=1)
+
+ return None
+
+
+def get_setuptools_version(args, python): # type: (EnvironmentConfig, str) -> t.Tuple[int]
+ """Return the setuptools version for the given python."""
+ try:
+ return str_to_version(raw_command([python, '-c', 'import setuptools; print(setuptools.__version__)'], capture=True)[0])
+ except SubprocessError:
+ if args.explain:
+ return tuple() # ignore errors in explain mode in case setuptools is not aleady installed
+
+ raise
+
+
+def get_cryptography_requirement(args, python_version): # type: (EnvironmentConfig, str) -> str
+ """
+ Return the correct cryptography requirement for the given python version.
+ The version of cryptography installed depends on the python version, setuptools version and openssl version.
+ """
+ python = find_python(python_version)
+ setuptools_version = get_setuptools_version(args, python)
+ openssl_version = get_openssl_version(args, python, python_version)
+
+ if setuptools_version >= (18, 5):
+ if python_version == '2.6':
+ # cryptography 2.2+ requires python 2.7+
+ # see https://github.com/pyca/cryptography/blob/master/CHANGELOG.rst#22---2018-03-19
+ cryptography = 'cryptography < 2.2'
+ elif openssl_version and openssl_version < (1, 1, 0):
+ # cryptography 3.2 requires openssl 1.1.x or later
+ # see https://cryptography.io/en/latest/changelog.html#v3-2
+ cryptography = 'cryptography < 3.2'
+ else:
+ # cryptography 3.4+ fails to install on many systems
+ # this is a temporary work-around until a more permanent solution is available
+ cryptography = 'cryptography < 3.4'
+ else:
+ # cryptography 2.1+ requires setuptools 18.5+
+ # see https://github.com/pyca/cryptography/blob/62287ae18383447585606b9d0765c0f1b8a9777c/setup.py#L26
+ cryptography = 'cryptography < 2.1'
+
+ return cryptography
+
+
+def install_command_requirements(args, python_version=None, context=None, enable_pyyaml_check=False):
+ """
+ :type args: EnvironmentConfig
+ :type python_version: str | None
+ :type context: str | None
+ :type enable_pyyaml_check: bool
+ """
+ if not args.explain:
+ make_dirs(ResultType.COVERAGE.path)
+ make_dirs(ResultType.DATA.path)
+
+ if isinstance(args, ShellConfig):
+ if args.raw:
+ return
+
+ if not args.requirements:
+ return
+
+ if isinstance(args, ShellConfig):
+ return
+
+ packages = []
+
+ if isinstance(args, TestConfig):
+ if args.coverage:
+ packages.append('coverage')
+ if args.junit:
+ packages.append('junit-xml')
+
+ if not python_version:
+ python_version = args.python_version
+
+ pip = generate_pip_command(find_python(python_version))
+
+ # skip packages which have aleady been installed for python_version
+
+ try:
+ package_cache = install_command_requirements.package_cache
+ except AttributeError:
+ package_cache = install_command_requirements.package_cache = {}
+
+ installed_packages = package_cache.setdefault(python_version, set())
+ skip_packages = [package for package in packages if package in installed_packages]
+
+ for package in skip_packages:
+ packages.remove(package)
+
+ installed_packages.update(packages)
+
+ if args.command != 'sanity':
+ install_ansible_test_requirements(args, pip)
+
+ # make sure setuptools is available before trying to install cryptography
+ # the installed version of setuptools affects the version of cryptography to install
+ run_command(args, generate_pip_install(pip, '', packages=['setuptools']))
+
+ # install the latest cryptography version that the current requirements can support
+ # use a custom constraints file to avoid the normal constraints file overriding the chosen version of cryptography
+ # if not installed here later install commands may try to install an unsupported version due to the presence of older setuptools
+ # this is done instead of upgrading setuptools to allow tests to function with older distribution provided versions of setuptools
+ run_command(args, generate_pip_install(pip, '',
+ packages=[get_cryptography_requirement(args, python_version)],
+ constraints=os.path.join(ANSIBLE_TEST_DATA_ROOT, 'cryptography-constraints.txt')))
+
+ commands = [generate_pip_install(pip, args.command, packages=packages, context=context)]
+
+ if isinstance(args, IntegrationConfig):
+ for cloud_platform in get_cloud_platforms(args):
+ commands.append(generate_pip_install(pip, '%s.cloud.%s' % (args.command, cloud_platform)))
+
+ commands = [cmd for cmd in commands if cmd]
+
+ if not commands:
+ return # no need to detect changes or run pip check since we are not making any changes
+
+ # only look for changes when more than one requirements file is needed
+ detect_pip_changes = len(commands) > 1
+
+ # first pass to install requirements, changes expected unless environment is already set up
+ install_ansible_test_requirements(args, pip)
+ changes = run_pip_commands(args, pip, commands, detect_pip_changes)
+
+ if changes:
+ # second pass to check for conflicts in requirements, changes are not expected here
+ changes = run_pip_commands(args, pip, commands, detect_pip_changes)
+
+ if changes:
+ raise ApplicationError('Conflicts detected in requirements. The following commands reported changes during verification:\n%s' %
+ '\n'.join((' '.join(cmd_quote(c) for c in cmd) for cmd in changes)))
+
+ if args.pip_check:
+ # ask pip to check for conflicts between installed packages
+ try:
+ run_command(args, pip + ['check', '--disable-pip-version-check'], capture=True)
+ except SubprocessError as ex:
+ if ex.stderr.strip() == 'ERROR: unknown command "check"':
+ display.warning('Cannot check pip requirements for conflicts because "pip check" is not supported.')
+ else:
+ raise
+
+ if enable_pyyaml_check:
+ # pyyaml may have been one of the requirements that was installed, so perform an optional check for it
+ check_pyyaml(args, python_version, required=False)
+
+
+def install_ansible_test_requirements(args, pip): # type: (EnvironmentConfig, t.List[str]) -> None
+ """Install requirements for ansible-test for the given pip if not already installed."""
+ try:
+ installed = install_command_requirements.installed
+ except AttributeError:
+ installed = install_command_requirements.installed = set()
+
+ if tuple(pip) in installed:
+ return
+
+ # make sure basic ansible-test requirements are met, including making sure that pip is recent enough to support constraints
+ # virtualenvs created by older distributions may include very old pip versions, such as those created in the centos6 test container (pip 6.0.8)
+ run_command(args, generate_pip_install(pip, 'ansible-test', use_constraints=False))
+
+ installed.add(tuple(pip))
+
+
+def run_pip_commands(args, pip, commands, detect_pip_changes=False):
+ """
+ :type args: EnvironmentConfig
+ :type pip: list[str]
+ :type commands: list[list[str]]
+ :type detect_pip_changes: bool
+ :rtype: list[list[str]]
+ """
+ changes = []
+
+ after_list = pip_list(args, pip) if detect_pip_changes else None
+
+ for cmd in commands:
+ if not cmd:
+ continue
+
+ before_list = after_list
+
+ run_command(args, cmd)
+
+ after_list = pip_list(args, pip) if detect_pip_changes else None
+
+ if before_list != after_list:
+ changes.append(cmd)
+
+ return changes
+
+
+def pip_list(args, pip):
+ """
+ :type args: EnvironmentConfig
+ :type pip: list[str]
+ :rtype: str
+ """
+ stdout = run_command(args, pip + ['list'], capture=True)[0]
+ return stdout
+
+
+def generate_pip_install(pip, command, packages=None, constraints=None, use_constraints=True, context=None):
+ """
+ :type pip: list[str]
+ :type command: str
+ :type packages: list[str] | None
+ :type constraints: str | None
+ :type use_constraints: bool
+ :type context: str | None
+ :rtype: list[str] | None
+ """
+ constraints = constraints or os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'constraints.txt')
+ requirements = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', '%s.txt' % ('%s.%s' % (command, context) if context else command))
+ content_constraints = None
+
+ options = []
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ if command == 'sanity' and data_context().content.is_ansible:
+ requirements = os.path.join(data_context().content.sanity_path, 'code-smell', '%s.requirements.txt' % context)
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ if command == 'units':
+ requirements = os.path.join(data_context().content.unit_path, 'requirements.txt')
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ content_constraints = os.path.join(data_context().content.unit_path, 'constraints.txt')
+
+ if command in ('integration', 'windows-integration', 'network-integration'):
+ requirements = os.path.join(data_context().content.integration_path, 'requirements.txt')
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ requirements = os.path.join(data_context().content.integration_path, '%s.requirements.txt' % command)
+
+ if os.path.exists(requirements) and os.path.getsize(requirements):
+ options += ['-r', requirements]
+
+ content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
+
+ if command.startswith('integration.cloud.'):
+ content_constraints = os.path.join(data_context().content.integration_path, 'constraints.txt')
+
+ if packages:
+ options += packages
+
+ if not options:
+ return None
+
+ if use_constraints:
+ if content_constraints and os.path.exists(content_constraints) and os.path.getsize(content_constraints):
+ # listing content constraints first gives them priority over constraints provided by ansible-test
+ options.extend(['-c', content_constraints])
+
+ options.extend(['-c', constraints])
+
+ return pip + ['install', '--disable-pip-version-check'] + options
+
+
+def command_shell(args):
+ """
+ :type args: ShellConfig
+ """
+ if args.delegate:
+ raise Delegate()
+
+ install_command_requirements(args)
+
+ if args.inject_httptester:
+ inject_httptester(args)
+
+ cmd = create_shell_command(['bash', '-i'])
+ run_command(args, cmd)
+
+
+def command_posix_integration(args):
+ """
+ :type args: PosixIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, os.path.basename(inventory_relative_path))
+
+ all_targets = tuple(walk_posix_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets)
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path)
+
+
+def command_network_integration(args):
+ """
+ :type args: NetworkIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if args.inventory:
+ inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if args.no_temp_workdir:
+ # temporary solution to keep DCI tests working
+ inventory_exists = os.path.exists(inventory_path)
+ else:
+ inventory_exists = os.path.isfile(inventory_path)
+
+ if not args.explain and not args.platform and not inventory_exists:
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --platform to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_network_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets, init_callback=network_init)
+ instances = [] # type: t.List[WrappedThread]
+
+ if args.platform:
+ get_python_path(args, args.python_executable) # initialize before starting threads
+
+ configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
+
+ for platform_version in args.platform:
+ platform, version = platform_version.split('/', 1)
+ config = configs.get(platform_version)
+
+ if not config:
+ continue
+
+ instance = WrappedThread(functools.partial(network_run, args, platform, version, config))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ remotes = [instance.wait_for_result() for instance in instances]
+ inventory = network_inventory(remotes)
+
+ display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
+
+ if not args.explain:
+ write_text_file(inventory_path, inventory)
+
+ success = False
+
+ try:
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path)
+ success = True
+ finally:
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ for instance in instances:
+ instance.result.stop()
+
+
+def network_init(args, internal_targets): # type: (NetworkIntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> None
+ """Initialize platforms for network integration tests."""
+ if not args.platform:
+ return
+
+ if args.metadata.instance_config is not None:
+ return
+
+ platform_targets = set(a for target in internal_targets for a in target.aliases if a.startswith('network/'))
+
+ instances = [] # type: t.List[WrappedThread]
+
+ # generate an ssh key (if needed) up front once, instead of for each instance
+ SshKey(args)
+
+ for platform_version in args.platform:
+ platform, version = platform_version.split('/', 1)
+ platform_target = 'network/%s/' % platform
+
+ if platform_target not in platform_targets:
+ display.warning('Skipping "%s" because selected tests do not target the "%s" platform.' % (
+ platform_version, platform))
+ continue
+
+ instance = WrappedThread(functools.partial(network_start, args, platform, version))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
+
+
+def network_start(args, platform, version):
+ """
+ :type args: NetworkIntegrationConfig
+ :type platform: str
+ :type version: str
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider)
+ core_ci.start()
+
+ return core_ci.save()
+
+
+def network_run(args, platform, version, config):
+ """
+ :type args: NetworkIntegrationConfig
+ :type platform: str
+ :type version: str
+ :type config: dict[str, str]
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, platform, version, stage=args.remote_stage, provider=args.remote_provider, load=False)
+ core_ci.load(config)
+ core_ci.wait()
+
+ manage = ManageNetworkCI(core_ci)
+ manage.wait()
+
+ return core_ci
+
+
+def network_inventory(remotes):
+ """
+ :type remotes: list[AnsibleCoreCI]
+ :rtype: str
+ """
+ groups = dict([(remote.platform, []) for remote in remotes])
+ net = []
+
+ for remote in remotes:
+ options = dict(
+ ansible_host=remote.connection.hostname,
+ ansible_user=remote.connection.username,
+ ansible_ssh_private_key_file=os.path.abspath(remote.ssh_key.key),
+ )
+
+ settings = get_network_settings(remote.args, remote.platform, remote.version)
+
+ options.update(settings.inventory_vars)
+
+ groups[remote.platform].append(
+ '%s %s' % (
+ remote.name.replace('.', '-'),
+ ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
+ )
+ )
+
+ net.append(remote.platform)
+
+ groups['net:children'] = net
+
+ template = ''
+
+ for group in groups:
+ hosts = '\n'.join(groups[group])
+
+ template += textwrap.dedent("""
+ [%s]
+ %s
+ """) % (group, hosts)
+
+ inventory = template
+
+ return inventory
+
+
+def command_windows_integration(args):
+ """
+ :type args: WindowsIntegrationConfig
+ """
+ handle_layout_messages(data_context().content.integration_messages)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, os.path.basename(inventory_relative_path)) + '.template'
+
+ if args.inventory:
+ inventory_path = os.path.join(data_context().content.root, data_context().content.integration_path, args.inventory)
+ else:
+ inventory_path = os.path.join(data_context().content.root, inventory_relative_path)
+
+ if not args.explain and not args.windows and not os.path.isfile(inventory_path):
+ raise ApplicationError(
+ 'Inventory not found: %s\n'
+ 'Use --inventory to specify the inventory path.\n'
+ 'Use --windows to provision resources and generate an inventory file.\n'
+ 'See also inventory template: %s' % (inventory_path, template_path)
+ )
+
+ check_inventory(args, inventory_path)
+ delegate_inventory(args, inventory_path)
+
+ all_targets = tuple(walk_windows_integration_targets(include_hidden=True))
+ internal_targets = command_integration_filter(args, all_targets, init_callback=windows_init)
+ instances = [] # type: t.List[WrappedThread]
+ pre_target = None
+ post_target = None
+ httptester_id = None
+
+ if args.windows:
+ get_python_path(args, args.python_executable) # initialize before starting threads
+
+ configs = dict((config['platform_version'], config) for config in args.metadata.instance_config)
+
+ for version in args.windows:
+ config = configs['windows/%s' % version]
+
+ instance = WrappedThread(functools.partial(windows_run, args, version, config))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ remotes = [instance.wait_for_result() for instance in instances]
+ inventory = windows_inventory(remotes)
+
+ display.info('>>> Inventory: %s\n%s' % (inventory_path, inventory.strip()), verbosity=3)
+
+ if not args.explain:
+ write_text_file(inventory_path, inventory)
+
+ use_httptester = args.httptester and any('needs/httptester/' in target.aliases for target in internal_targets)
+ # if running under Docker delegation, the httptester may have already been started
+ docker_httptester = bool(os.environ.get("HTTPTESTER", False))
+
+ if use_httptester and not docker_available() and not docker_httptester:
+ display.warning('Assuming --disable-httptester since `docker` is not available.')
+ elif use_httptester:
+ if docker_httptester:
+ # we are running in a Docker container that is linked to the httptester container, we just need to
+ # forward these requests to the linked hostname
+ first_host = HTTPTESTER_HOSTS[0]
+ ssh_options = ["-R", "8080:%s:80" % first_host, "-R", "8443:%s:443" % first_host]
+ else:
+ # we are running directly and need to start the httptester container ourselves and forward the port
+ # from there manually set so HTTPTESTER env var is set during the run
+ args.inject_httptester = True
+ httptester_id, ssh_options = start_httptester(args)
+
+ # to get this SSH command to run in the background we need to set to run in background (-f) and disable
+ # the pty allocation (-T)
+ ssh_options.insert(0, "-fT")
+
+ # create a script that will continue to run in the background until the script is deleted, this will
+ # cleanup and close the connection
+ def forward_ssh_ports(target):
+ """
+ :type target: IntegrationTarget
+ """
+ if 'needs/httptester/' not in target.aliases:
+ return
+
+ for remote in [r for r in remotes if r.version != '2008']:
+ manage = ManageWindowsCI(remote)
+ manage.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'windows-httptester.ps1'), watcher_path)
+
+ # We cannot pass an array of string with -File so we just use a delimiter for multiple values
+ script = "powershell.exe -NoProfile -ExecutionPolicy Bypass -File .\\%s -Hosts \"%s\"" \
+ % (watcher_path, "|".join(HTTPTESTER_HOSTS))
+ if args.verbosity > 3:
+ script += " -Verbose"
+ manage.ssh(script, options=ssh_options, force_pty=False)
+
+ def cleanup_ssh_ports(target):
+ """
+ :type target: IntegrationTarget
+ """
+ if 'needs/httptester/' not in target.aliases:
+ return
+
+ for remote in [r for r in remotes if r.version != '2008']:
+ # delete the tmp file that keeps the http-tester alive
+ manage = ManageWindowsCI(remote)
+ manage.ssh("cmd.exe /c \"del %s /F /Q\"" % watcher_path, force_pty=False)
+
+ watcher_path = "ansible-test-http-watcher-%s.ps1" % time.time()
+ pre_target = forward_ssh_ports
+ post_target = cleanup_ssh_ports
+
+ def run_playbook(playbook, run_playbook_vars): # type: (str, t.Dict[str, t.Any]) -> None
+ playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
+ command = ['ansible-playbook', '-i', inventory_path, playbook_path, '-e', json.dumps(run_playbook_vars)]
+ if args.verbosity:
+ command.append('-%s' % ('v' * args.verbosity))
+
+ env = ansible_environment(args)
+ intercept_command(args, command, '', env, disable_coverage=True)
+
+ remote_temp_path = None
+
+ if args.coverage and not args.coverage_check:
+ # Create the remote directory that is writable by everyone. Use Ansible to talk to the remote host.
+ remote_temp_path = 'C:\\ansible_test_coverage_%s' % time.time()
+ playbook_vars = {'remote_temp_path': remote_temp_path}
+ run_playbook('windows_coverage_setup.yml', playbook_vars)
+
+ success = False
+
+ try:
+ command_integration_filtered(args, internal_targets, all_targets, inventory_path, pre_target=pre_target,
+ post_target=post_target, remote_temp_path=remote_temp_path)
+ success = True
+ finally:
+ if httptester_id:
+ docker_rm(args, httptester_id)
+
+ if remote_temp_path:
+ # Zip up the coverage files that were generated and fetch it back to localhost.
+ with tempdir() as local_temp_path:
+ playbook_vars = {'remote_temp_path': remote_temp_path, 'local_temp_path': local_temp_path}
+ run_playbook('windows_coverage_teardown.yml', playbook_vars)
+
+ for filename in os.listdir(local_temp_path):
+ with open_zipfile(os.path.join(local_temp_path, filename)) as coverage_zip:
+ coverage_zip.extractall(ResultType.COVERAGE.path)
+
+ if args.remote_terminate == 'always' or (args.remote_terminate == 'success' and success):
+ for instance in instances:
+ instance.result.stop()
+
+
+# noinspection PyUnusedLocal
+def windows_init(args, internal_targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: WindowsIntegrationConfig
+ :type internal_targets: tuple[IntegrationTarget]
+ """
+ if not args.windows:
+ return
+
+ if args.metadata.instance_config is not None:
+ return
+
+ instances = [] # type: t.List[WrappedThread]
+
+ for version in args.windows:
+ instance = WrappedThread(functools.partial(windows_start, args, version))
+ instance.daemon = True
+ instance.start()
+ instances.append(instance)
+
+ while any(instance.is_alive() for instance in instances):
+ time.sleep(1)
+
+ args.metadata.instance_config = [instance.wait_for_result() for instance in instances]
+
+
+def windows_start(args, version):
+ """
+ :type args: WindowsIntegrationConfig
+ :type version: str
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider)
+ core_ci.start()
+
+ return core_ci.save()
+
+
+def windows_run(args, version, config):
+ """
+ :type args: WindowsIntegrationConfig
+ :type version: str
+ :type config: dict[str, str]
+ :rtype: AnsibleCoreCI
+ """
+ core_ci = AnsibleCoreCI(args, 'windows', version, stage=args.remote_stage, provider=args.remote_provider, load=False)
+ core_ci.load(config)
+ core_ci.wait()
+
+ manage = ManageWindowsCI(core_ci)
+ manage.wait()
+
+ return core_ci
+
+
+def windows_inventory(remotes):
+ """
+ :type remotes: list[AnsibleCoreCI]
+ :rtype: str
+ """
+ hosts = []
+
+ for remote in remotes:
+ options = dict(
+ ansible_host=remote.connection.hostname,
+ ansible_user=remote.connection.username,
+ ansible_password=remote.connection.password,
+ ansible_port=remote.connection.port,
+ )
+
+ # used for the connection_windows_ssh test target
+ if remote.ssh_key:
+ options["ansible_ssh_private_key_file"] = os.path.abspath(remote.ssh_key.key)
+
+ if remote.name == 'windows-2008':
+ options.update(
+ # force 2008 to use PSRP for the connection plugin
+ ansible_connection='psrp',
+ ansible_psrp_auth='basic',
+ ansible_psrp_cert_validation='ignore',
+ )
+ elif remote.name == 'windows-2016':
+ options.update(
+ # force 2016 to use NTLM + HTTP message encryption
+ ansible_connection='winrm',
+ ansible_winrm_server_cert_validation='ignore',
+ ansible_winrm_transport='ntlm',
+ ansible_winrm_scheme='http',
+ ansible_port='5985',
+ )
+ else:
+ options.update(
+ ansible_connection='winrm',
+ ansible_winrm_server_cert_validation='ignore',
+ )
+
+ hosts.append(
+ '%s %s' % (
+ remote.name.replace('/', '_'),
+ ' '.join('%s="%s"' % (k, options[k]) for k in sorted(options)),
+ )
+ )
+
+ template = """
+ [windows]
+ %s
+
+ # support winrm binary module tests (temporary solution)
+ [testhost:children]
+ windows
+ """
+
+ template = textwrap.dedent(template)
+ inventory = template % ('\n'.join(hosts))
+
+ return inventory
+
+
+def command_integration_filter(args, # type: TIntegrationConfig
+ targets, # type: t.Iterable[TIntegrationTarget]
+ init_callback=None, # type: t.Callable[[TIntegrationConfig, t.Tuple[TIntegrationTarget, ...]], None]
+ ): # type: (...) -> t.Tuple[TIntegrationTarget, ...]
+ """Filter the given integration test targets."""
+ targets = tuple(target for target in targets if 'hidden/' not in target.aliases)
+ changes = get_changes_filter(args)
+
+ # special behavior when the --changed-all-target target is selected based on changes
+ if args.changed_all_target in changes:
+ # act as though the --changed-all-target target was in the include list
+ if args.changed_all_mode == 'include' and args.changed_all_target not in args.include:
+ args.include.append(args.changed_all_target)
+ args.delegate_args += ['--include', args.changed_all_target]
+ # act as though the --changed-all-target target was in the exclude list
+ elif args.changed_all_mode == 'exclude' and args.changed_all_target not in args.exclude:
+ args.exclude.append(args.changed_all_target)
+
+ require = args.require + changes
+ exclude = args.exclude
+
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+ environment_exclude = get_integration_filter(args, internal_targets)
+
+ environment_exclude += cloud_filter(args, internal_targets)
+
+ if environment_exclude:
+ exclude += environment_exclude
+ internal_targets = walk_internal_targets(targets, args.include, exclude, require)
+
+ if not internal_targets:
+ raise AllTargetsSkipped()
+
+ if args.start_at and not any(target.name == args.start_at for target in internal_targets):
+ raise ApplicationError('Start at target matches nothing: %s' % args.start_at)
+
+ if init_callback:
+ init_callback(args, internal_targets)
+
+ cloud_init(args, internal_targets)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ if os.path.exists(vars_file_src):
+ def integration_config_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the integration config vars file to the payload file list.
+ This will preserve the file during delegation even if the file is ignored by source control.
+ """
+ files.append((vars_file_src, data_context().content.integration_vars_path))
+
+ data_context().register_payload_callback(integration_config_callback)
+
+ if args.delegate:
+ raise Delegate(require=require, exclude=exclude, integration_targets=internal_targets)
+
+ install_command_requirements(args)
+
+ return internal_targets
+
+
+def command_integration_filtered(args, targets, all_targets, inventory_path, pre_target=None, post_target=None,
+ remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :type all_targets: tuple[IntegrationTarget]
+ :type inventory_path: str
+ :type pre_target: (IntegrationTarget) -> None | None
+ :type post_target: (IntegrationTarget) -> None | None
+ :type remote_temp_path: str | None
+ """
+ found = False
+ passed = []
+ failed = []
+
+ targets_iter = iter(targets)
+ all_targets_dict = dict((target.name, target) for target in all_targets)
+
+ setup_errors = []
+ setup_targets_executed = set()
+
+ for target in all_targets:
+ for setup_target in target.setup_once + target.setup_always:
+ if setup_target not in all_targets_dict:
+ setup_errors.append('Target "%s" contains invalid setup target: %s' % (target.name, setup_target))
+
+ if setup_errors:
+ raise ApplicationError('Found %d invalid setup aliases:\n%s' % (len(setup_errors), '\n'.join(setup_errors)))
+
+ check_pyyaml(args, args.python_version)
+
+ test_dir = os.path.join(ResultType.TMP.path, 'output_dir')
+
+ if not args.explain and any('needs/ssh/' in target.aliases for target in targets):
+ max_tries = 20
+ display.info('SSH service required for tests. Checking to make sure we can connect.')
+ for i in range(1, max_tries + 1):
+ try:
+ run_command(args, ['ssh', '-o', 'BatchMode=yes', 'localhost', 'id'], capture=True)
+ display.info('SSH service responded.')
+ break
+ except SubprocessError:
+ if i == max_tries:
+ raise
+ seconds = 3
+ display.warning('SSH service not responding. Waiting %d second(s) before checking again.' % seconds)
+ time.sleep(seconds)
+
+ # Windows is different as Ansible execution is done locally but the host is remote
+ if args.inject_httptester and not isinstance(args, WindowsIntegrationConfig):
+ inject_httptester(args)
+
+ start_at_task = args.start_at_task
+
+ results = {}
+
+ current_environment = None # type: t.Optional[EnvironmentDescription]
+
+ # common temporary directory path that will be valid on both the controller and the remote
+ # it must be common because it will be referenced in environment variables that are shared across multiple hosts
+ common_temp_path = '/tmp/ansible-test-%s' % ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(8))
+
+ setup_common_temp_dir(args, common_temp_path)
+
+ try:
+ for target in targets_iter:
+ if args.start_at and not found:
+ found = target.name == args.start_at
+
+ if not found:
+ continue
+
+ if args.list_targets:
+ print(target.name)
+ continue
+
+ tries = 2 if args.retry_on_error else 1
+ verbosity = args.verbosity
+
+ cloud_environment = get_cloud_environment(args, target)
+
+ original_environment = current_environment if current_environment else EnvironmentDescription(args)
+ current_environment = None
+
+ display.info('>>> Environment Description\n%s' % original_environment, verbosity=3)
+
+ try:
+ while tries:
+ tries -= 1
+
+ try:
+ if cloud_environment:
+ cloud_environment.setup_once()
+
+ run_setup_targets(args, test_dir, target.setup_once, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, False)
+
+ start_time = time.time()
+
+ run_setup_targets(args, test_dir, target.setup_always, all_targets_dict, setup_targets_executed, inventory_path, common_temp_path, True)
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ if pre_target:
+ pre_target(target)
+
+ try:
+ if target.script_path:
+ command_integration_script(args, target, test_dir, inventory_path, common_temp_path,
+ remote_temp_path=remote_temp_path)
+ else:
+ command_integration_role(args, target, start_at_task, test_dir, inventory_path,
+ common_temp_path, remote_temp_path=remote_temp_path)
+ start_at_task = None
+ finally:
+ if post_target:
+ post_target(target)
+
+ end_time = time.time()
+
+ results[target.name] = dict(
+ name=target.name,
+ type=target.type,
+ aliases=target.aliases,
+ modules=target.modules,
+ run_time_seconds=int(end_time - start_time),
+ setup_once=target.setup_once,
+ setup_always=target.setup_always,
+ coverage=args.coverage,
+ coverage_label=args.coverage_label,
+ python_version=args.python_version,
+ )
+
+ break
+ except SubprocessError:
+ if cloud_environment:
+ cloud_environment.on_failure(target, tries)
+
+ if not original_environment.validate(target.name, throw=False):
+ raise
+
+ if not tries:
+ raise
+
+ display.warning('Retrying test target "%s" with maximum verbosity.' % target.name)
+ display.verbosity = args.verbosity = 6
+
+ start_time = time.time()
+ current_environment = EnvironmentDescription(args)
+ end_time = time.time()
+
+ EnvironmentDescription.check(original_environment, current_environment, target.name, throw=True)
+
+ results[target.name]['validation_seconds'] = int(end_time - start_time)
+
+ passed.append(target)
+ except Exception as ex:
+ failed.append(target)
+
+ if args.continue_on_error:
+ display.error(ex)
+ continue
+
+ display.notice('To resume at this test target, use the option: --start-at %s' % target.name)
+
+ next_target = next(targets_iter, None)
+
+ if next_target:
+ display.notice('To resume after this test target, use the option: --start-at %s' % next_target.name)
+
+ raise
+ finally:
+ display.verbosity = args.verbosity = verbosity
+
+ finally:
+ if not args.explain:
+ if args.coverage:
+ coverage_temp_path = os.path.join(common_temp_path, ResultType.COVERAGE.name)
+ coverage_save_path = ResultType.COVERAGE.path
+
+ for filename in os.listdir(coverage_temp_path):
+ shutil.copy(os.path.join(coverage_temp_path, filename), os.path.join(coverage_save_path, filename))
+
+ remove_tree(common_temp_path)
+
+ result_name = '%s-%s.json' % (
+ args.command, re.sub(r'[^0-9]', '-', str(datetime.datetime.utcnow().replace(microsecond=0))))
+
+ data = dict(
+ targets=results,
+ )
+
+ write_json_test_results(ResultType.DATA, result_name, data)
+
+ if failed:
+ raise ApplicationError('The %d integration test(s) listed below (out of %d) failed. See error output above for details:\n%s' % (
+ len(failed), len(passed) + len(failed), '\n'.join(target.name for target in failed)))
+
+
+def start_httptester(args):
+ """
+ :type args: EnvironmentConfig
+ :rtype: str, list[str]
+ """
+
+ # map ports from remote -> localhost -> container
+ # passing through localhost is only used when ansible-test is not already running inside a docker container
+ ports = [
+ dict(
+ remote=8080,
+ container=80,
+ ),
+ dict(
+ remote=8443,
+ container=443,
+ ),
+ ]
+
+ container_id = get_docker_container_id()
+
+ if not container_id:
+ for item in ports:
+ item['localhost'] = get_available_port()
+
+ docker_pull(args, args.httptester)
+
+ httptester_id = run_httptester(args, dict((port['localhost'], port['container']) for port in ports if 'localhost' in port))
+
+ if container_id:
+ container_host = get_docker_container_ip(args, httptester_id)
+ display.info('Found httptester container address: %s' % container_host, verbosity=1)
+ else:
+ container_host = get_docker_hostname()
+
+ ssh_options = []
+
+ for port in ports:
+ ssh_options += ['-R', '%d:%s:%d' % (port['remote'], container_host, port.get('localhost', port['container']))]
+
+ return httptester_id, ssh_options
+
+
+def run_httptester(args, ports=None):
+ """
+ :type args: EnvironmentConfig
+ :type ports: dict[int, int] | None
+ :rtype: str
+ """
+ options = [
+ '--detach',
+ ]
+
+ if ports:
+ for localhost_port, container_port in ports.items():
+ options += ['-p', '%d:%d' % (localhost_port, container_port)]
+
+ network = get_docker_preferred_network_name(args)
+
+ if is_docker_user_defined_network(network):
+ # network-scoped aliases are only supported for containers in user defined networks
+ for alias in HTTPTESTER_HOSTS:
+ options.extend(['--network-alias', alias])
+
+ httptester_id = docker_run(args, args.httptester, options=options)[0]
+
+ if args.explain:
+ httptester_id = 'httptester_id'
+ else:
+ httptester_id = httptester_id.strip()
+
+ return httptester_id
+
+
+def inject_httptester(args):
+ """
+ :type args: CommonConfig
+ """
+ comment = ' # ansible-test httptester\n'
+ append_lines = ['127.0.0.1 %s%s' % (host, comment) for host in HTTPTESTER_HOSTS]
+ hosts_path = '/etc/hosts'
+
+ original_lines = read_text_file(hosts_path).splitlines(True)
+
+ if not any(line.endswith(comment) for line in original_lines):
+ write_text_file(hosts_path, ''.join(original_lines + append_lines))
+
+ # determine which forwarding mechanism to use
+ pfctl = find_executable('pfctl', required=False)
+ iptables = find_executable('iptables', required=False)
+
+ if pfctl:
+ kldload = find_executable('kldload', required=False)
+
+ if kldload:
+ try:
+ run_command(args, ['kldload', 'pf'], capture=True)
+ except SubprocessError:
+ pass # already loaded
+
+ rules = '''
+rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080
+rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443
+'''
+ cmd = ['pfctl', '-ef', '-']
+
+ try:
+ run_command(args, cmd, capture=True, data=rules)
+ except SubprocessError:
+ pass # non-zero exit status on success
+
+ elif iptables:
+ ports = [
+ (80, 8080),
+ (443, 8443),
+ ]
+
+ for src, dst in ports:
+ rule = ['-o', 'lo', '-p', 'tcp', '--dport', str(src), '-j', 'REDIRECT', '--to-port', str(dst)]
+
+ try:
+ # check for existing rule
+ cmd = ['iptables', '-t', 'nat', '-C', 'OUTPUT'] + rule
+ run_command(args, cmd, capture=True)
+ except SubprocessError:
+ # append rule when it does not exist
+ cmd = ['iptables', '-t', 'nat', '-A', 'OUTPUT'] + rule
+ run_command(args, cmd, capture=True)
+ else:
+ raise ApplicationError('No supported port forwarding mechanism detected.')
+
+
+def run_setup_targets(args, test_dir, target_names, targets_dict, targets_executed, inventory_path, temp_path, always):
+ """
+ :type args: IntegrationConfig
+ :type test_dir: str
+ :type target_names: list[str]
+ :type targets_dict: dict[str, IntegrationTarget]
+ :type targets_executed: set[str]
+ :type inventory_path: str
+ :type temp_path: str
+ :type always: bool
+ """
+ for target_name in target_names:
+ if not always and target_name in targets_executed:
+ continue
+
+ target = targets_dict[target_name]
+
+ if not args.explain:
+ # create a fresh test directory for each test target
+ remove_tree(test_dir)
+ make_dirs(test_dir)
+
+ if target.script_path:
+ command_integration_script(args, target, test_dir, inventory_path, temp_path)
+ else:
+ command_integration_role(args, target, None, test_dir, inventory_path, temp_path)
+
+ targets_executed.add(target_name)
+
+
+def integration_environment(args, target, test_dir, inventory_path, ansible_config, env_config):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type test_dir: str
+ :type inventory_path: str
+ :type ansible_config: str | None
+ :type env_config: CloudEnvironmentConfig | None
+ :rtype: dict[str, str]
+ """
+ env = ansible_environment(args, ansible_config=ansible_config)
+
+ if args.inject_httptester:
+ env.update(dict(
+ HTTPTESTER='1',
+ ))
+
+ callback_plugins = ['junit'] + (env_config.callback_plugins or [] if env_config else [])
+
+ integration = dict(
+ JUNIT_OUTPUT_DIR=ResultType.JUNIT.path,
+ ANSIBLE_CALLBACK_WHITELIST=','.join(sorted(set(callback_plugins))),
+ ANSIBLE_TEST_CI=args.metadata.ci_provider or get_ci_provider().code,
+ ANSIBLE_TEST_COVERAGE='check' if args.coverage_check else ('yes' if args.coverage else ''),
+ OUTPUT_DIR=test_dir,
+ INVENTORY_PATH=os.path.abspath(inventory_path),
+ )
+
+ if args.debug_strategy:
+ env.update(dict(ANSIBLE_STRATEGY='debug'))
+
+ if 'non_local/' in target.aliases:
+ if args.coverage:
+ display.warning('Skipping coverage reporting on Ansible modules for non-local test: %s' % target.name)
+
+ env.update(dict(ANSIBLE_TEST_REMOTE_INTERPRETER=''))
+
+ env.update(integration)
+
+ return env
+
+
+def command_integration_script(args, target, test_dir, inventory_path, temp_path, remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type test_dir: str
+ :type inventory_path: str
+ :type temp_path: str
+ :type remote_temp_path: str | None
+ """
+ display.info('Running %s integration test script' % target.name)
+
+ env_config = None
+
+ if isinstance(args, PosixIntegrationConfig):
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ with integration_test_environment(args, target, inventory_path) as test_env:
+ cmd = ['./%s' % os.path.basename(target.script_path)]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
+ cwd = os.path.join(test_env.targets_dir, target.relative_path)
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ if env_config and env_config.env_vars:
+ env.update(env_config.env_vars)
+
+ with integration_test_config_file(args, env_config, test_env.integration_dir) as config_path:
+ if config_path:
+ cmd += ['-e', '@%s' % config_path]
+
+ module_coverage = 'non_local/' not in target.aliases
+ intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
+ remote_temp_path=remote_temp_path, module_coverage=module_coverage)
+
+
+def command_integration_role(args, target, start_at_task, test_dir, inventory_path, temp_path, remote_temp_path=None):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type start_at_task: str | None
+ :type test_dir: str
+ :type inventory_path: str
+ :type temp_path: str
+ :type remote_temp_path: str | None
+ """
+ display.info('Running %s integration test role' % target.name)
+
+ env_config = None
+
+ vars_files = []
+ variables = dict(
+ output_dir=test_dir,
+ )
+
+ if isinstance(args, WindowsIntegrationConfig):
+ hosts = 'windows'
+ gather_facts = False
+ variables.update(dict(
+ win_output_dir=r'C:\ansible_testing',
+ ))
+ elif isinstance(args, NetworkIntegrationConfig):
+ hosts = target.network_platform
+ gather_facts = False
+ else:
+ hosts = 'testhost'
+ gather_facts = True
+
+ cloud_environment = get_cloud_environment(args, target)
+
+ if cloud_environment:
+ env_config = cloud_environment.get_environment_config()
+
+ with integration_test_environment(args, target, inventory_path) as test_env:
+ if os.path.exists(test_env.vars_file):
+ vars_files.append(os.path.relpath(test_env.vars_file, test_env.integration_dir))
+
+ play = dict(
+ hosts=hosts,
+ gather_facts=gather_facts,
+ vars_files=vars_files,
+ vars=variables,
+ roles=[
+ target.name,
+ ],
+ )
+
+ if env_config:
+ if env_config.ansible_vars:
+ variables.update(env_config.ansible_vars)
+
+ play.update(dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ ))
+
+ playbook = json.dumps([play], indent=4, sort_keys=True)
+
+ with named_temporary_file(args=args, directory=test_env.integration_dir, prefix='%s-' % target.name, suffix='.yml', content=playbook) as playbook_path:
+ filename = os.path.basename(playbook_path)
+
+ display.info('>>> Playbook: %s\n%s' % (filename, playbook.strip()), verbosity=3)
+
+ cmd = ['ansible-playbook', filename, '-i', os.path.relpath(test_env.inventory_path, test_env.integration_dir)]
+
+ if start_at_task:
+ cmd += ['--start-at-task', start_at_task]
+
+ if args.tags:
+ cmd += ['--tags', args.tags]
+
+ if args.skip_tags:
+ cmd += ['--skip-tags', args.skip_tags]
+
+ if args.diff:
+ cmd += ['--diff']
+
+ if isinstance(args, NetworkIntegrationConfig):
+ if args.testcase:
+ cmd += ['-e', 'testcase=%s' % args.testcase]
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ env = integration_environment(args, target, test_dir, test_env.inventory_path, test_env.ansible_config, env_config)
+ cwd = test_env.integration_dir
+
+ env.update(dict(
+ # support use of adhoc ansible commands in collections without specifying the fully qualified collection name
+ ANSIBLE_PLAYBOOK_DIR=cwd,
+ ))
+
+ env['ANSIBLE_ROLES_PATH'] = test_env.targets_dir
+
+ module_coverage = 'non_local/' not in target.aliases
+ intercept_command(args, cmd, target_name=target.name, env=env, cwd=cwd, temp_path=temp_path,
+ remote_temp_path=remote_temp_path, module_coverage=module_coverage)
+
+
+def get_changes_filter(args):
+ """
+ :type args: TestConfig
+ :rtype: list[str]
+ """
+ paths = detect_changes(args)
+
+ if not args.metadata.change_description:
+ if paths:
+ changes = categorize_changes(args, paths, args.command)
+ else:
+ changes = ChangeDescription()
+
+ args.metadata.change_description = changes
+
+ if paths is None:
+ return [] # change detection not enabled, do not filter targets
+
+ if not paths:
+ raise NoChangesDetected()
+
+ if args.metadata.change_description.targets is None:
+ raise NoTestsForChanges()
+
+ return args.metadata.change_description.targets
+
+
+def detect_changes(args):
+ """
+ :type args: TestConfig
+ :rtype: list[str] | None
+ """
+ if args.changed:
+ paths = get_ci_provider().detect_changes(args)
+ elif args.changed_from or args.changed_path:
+ paths = args.changed_path or []
+ if args.changed_from:
+ paths += read_text_file(args.changed_from).splitlines()
+ else:
+ return None # change detection not enabled
+
+ if paths is None:
+ return None # act as though change detection not enabled, do not filter targets
+
+ display.info('Detected changes in %d file(s).' % len(paths))
+
+ for path in paths:
+ display.info(path, verbosity=1)
+
+ return paths
+
+
+def get_integration_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ if args.docker:
+ return get_integration_docker_filter(args, targets)
+
+ if args.remote:
+ return get_integration_remote_filter(args, targets)
+
+ return get_integration_local_filter(args, targets)
+
+
+def common_integration_filter(args, targets, exclude):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :type exclude: list[str]
+ """
+ override_disabled = set(target for target in args.include if target.startswith('disabled/'))
+
+ if not args.allow_disabled:
+ skip = 'disabled/'
+ override = [target.name for target in targets if override_disabled & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-disabled or prefixing with "disabled/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_unsupported = set(target for target in args.include if target.startswith('unsupported/'))
+
+ if not args.allow_unsupported:
+ skip = 'unsupported/'
+ override = [target.name for target in targets if override_unsupported & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-unsupported or prefixing with "unsupported/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_unstable = set(target for target in args.include if target.startswith('unstable/'))
+
+ if args.allow_unstable_changed:
+ override_unstable |= set(args.metadata.change_description.focused_targets or [])
+
+ if not args.allow_unstable:
+ skip = 'unstable/'
+ override = [target.name for target in targets if override_unstable & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-unstable or prefixing with "unstable/": %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ # only skip a Windows test if using --windows and all the --windows versions are defined in the aliases as skip/windows/%s
+ if isinstance(args, WindowsIntegrationConfig) and args.windows:
+ all_skipped = []
+ not_skipped = []
+
+ for target in targets:
+ if "skip/windows/" not in target.aliases:
+ continue
+
+ skip_valid = []
+ skip_missing = []
+ for version in args.windows:
+ if "skip/windows/%s/" % version in target.aliases:
+ skip_valid.append(version)
+ else:
+ skip_missing.append(version)
+
+ if skip_missing and skip_valid:
+ not_skipped.append((target.name, skip_valid, skip_missing))
+ elif skip_valid:
+ all_skipped.append(target.name)
+
+ if all_skipped:
+ exclude.extend(all_skipped)
+ skip_aliases = ["skip/windows/%s/" % w for w in args.windows]
+ display.warning('Excluding tests marked "%s" which are set to skip with --windows %s: %s'
+ % ('", "'.join(skip_aliases), ', '.join(args.windows), ', '.join(all_skipped)))
+
+ if not_skipped:
+ for target, skip_valid, skip_missing in not_skipped:
+ # warn when failing to skip due to lack of support for skipping only some versions
+ display.warning('Including test "%s" which was marked to skip for --windows %s but not %s.'
+ % (target, ', '.join(skip_valid), ', '.join(skip_missing)))
+
+
+def get_integration_local_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ if not args.allow_root and os.getuid() != 0:
+ skip = 'needs/root/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require --allow-root or running as root: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ override_destructive = set(target for target in args.include if target.startswith('destructive/'))
+
+ if not args.allow_destructive:
+ skip = 'destructive/'
+ override = [target.name for target in targets if override_destructive & set(target.aliases)]
+ skipped = [target.name for target in targets if skip in target.aliases and target.name not in override]
+ if skipped:
+ exclude.extend(skipped)
+ display.warning('Excluding tests marked "%s" which require --allow-destructive or prefixing with "destructive/" to run locally: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ exclude_targets_by_python_version(targets, args.python_version, exclude)
+
+ return exclude
+
+
+def get_integration_docker_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ skip = 'skip/docker/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which cannot run under docker: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ if not args.docker_privileged:
+ skip = 'needs/privileged/'
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which require --docker-privileged to run under docker: %s'
+ % (skip.rstrip('/'), ', '.join(skipped)))
+
+ python_version = get_python_version(args, get_docker_completion(), args.docker_raw)
+
+ exclude_targets_by_python_version(targets, python_version, exclude)
+
+ return exclude
+
+
+def get_integration_remote_filter(args, targets):
+ """
+ :type args: IntegrationConfig
+ :type targets: tuple[IntegrationTarget]
+ :rtype: list[str]
+ """
+ remote = args.parsed_remote
+
+ exclude = []
+
+ common_integration_filter(args, targets, exclude)
+
+ skips = {
+ 'skip/%s' % remote.platform: remote.platform,
+ 'skip/%s/%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version),
+ 'skip/%s%s' % (remote.platform, remote.version): '%s %s' % (remote.platform, remote.version), # legacy syntax, use above format
+ }
+
+ if remote.arch:
+ skips.update({
+ 'skip/%s/%s' % (remote.arch, remote.platform): '%s on %s' % (remote.platform, remote.arch),
+ 'skip/%s/%s/%s' % (remote.arch, remote.platform, remote.version): '%s %s on %s' % (remote.platform, remote.version, remote.arch),
+ })
+
+ for skip, description in skips.items():
+ skipped = [target.name for target in targets if skip in target.skips]
+ if skipped:
+ exclude.append(skip + '/')
+ display.warning('Excluding tests marked "%s" which are not supported on %s: %s' % (skip, description, ', '.join(skipped)))
+
+ python_version = get_python_version(args, get_remote_completion(), args.remote)
+
+ exclude_targets_by_python_version(targets, python_version, exclude)
+
+ return exclude
+
+
+def exclude_targets_by_python_version(targets, python_version, exclude):
+ """
+ :type targets: tuple[IntegrationTarget]
+ :type python_version: str
+ :type exclude: list[str]
+ """
+ if not python_version:
+ display.warning('Python version unknown. Unable to skip tests based on Python version.')
+ return
+
+ python_major_version = python_version.split('.')[0]
+
+ skip = 'skip/python%s/' % python_version
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
+ % (skip.rstrip('/'), python_version, ', '.join(skipped)))
+
+ skip = 'skip/python%s/' % python_major_version
+ skipped = [target.name for target in targets if skip in target.aliases]
+ if skipped:
+ exclude.append(skip)
+ display.warning('Excluding tests marked "%s" which are not supported on python %s: %s'
+ % (skip.rstrip('/'), python_version, ', '.join(skipped)))
+
+
+def get_python_version(args, configs, name):
+ """
+ :type args: EnvironmentConfig
+ :type configs: dict[str, dict[str, str]]
+ :type name: str
+ """
+ config = configs.get(name, {})
+ config_python = config.get('python')
+
+ if not config or not config_python:
+ if args.python:
+ return args.python
+
+ display.warning('No Python version specified. '
+ 'Use completion config or the --python option to specify one.', unique=True)
+
+ return '' # failure to provide a version may result in failures or reduced functionality later
+
+ supported_python_versions = config_python.split(',')
+ default_python_version = supported_python_versions[0]
+
+ if args.python and args.python not in supported_python_versions:
+ raise ApplicationError('Python %s is not supported by %s. Supported Python version(s) are: %s' % (
+ args.python, name, ', '.join(sorted(supported_python_versions))))
+
+ python_version = args.python or default_python_version
+
+ return python_version
+
+
+def get_python_interpreter(args, configs, name):
+ """
+ :type args: EnvironmentConfig
+ :type configs: dict[str, dict[str, str]]
+ :type name: str
+ """
+ if args.python_interpreter:
+ return args.python_interpreter
+
+ config = configs.get(name, {})
+
+ if not config:
+ if args.python:
+ guess = 'python%s' % args.python
+ else:
+ guess = 'python'
+
+ display.warning('Using "%s" as the Python interpreter. '
+ 'Use completion config or the --python-interpreter option to specify the path.' % guess, unique=True)
+
+ return guess
+
+ python_version = get_python_version(args, configs, name)
+
+ python_dir = config.get('python_dir', '/usr/bin')
+ python_interpreter = os.path.join(python_dir, 'python%s' % python_version)
+ python_interpreter = config.get('python%s' % python_version, python_interpreter)
+
+ return python_interpreter
+
+
+class EnvironmentDescription:
+ """Description of current running environment."""
+ def __init__(self, args):
+ """Initialize snapshot of environment configuration.
+ :type args: IntegrationConfig
+ """
+ self.args = args
+
+ if self.args.explain:
+ self.data = {}
+ return
+
+ warnings = []
+
+ versions = ['']
+ versions += SUPPORTED_PYTHON_VERSIONS
+ versions += list(set(v.split('.')[0] for v in SUPPORTED_PYTHON_VERSIONS))
+
+ version_check = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'versions.py')
+ python_paths = dict((v, find_executable('python%s' % v, required=False)) for v in sorted(versions))
+ pip_paths = dict((v, find_executable('pip%s' % v, required=False)) for v in sorted(versions))
+ program_versions = dict((v, self.get_version([python_paths[v], version_check], warnings)) for v in sorted(python_paths) if python_paths[v])
+ pip_interpreters = dict((v, self.get_shebang(pip_paths[v])) for v in sorted(pip_paths) if pip_paths[v])
+ known_hosts_hash = self.get_hash(os.path.expanduser('~/.ssh/known_hosts'))
+
+ for version in sorted(versions):
+ self.check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings)
+
+ for warning in warnings:
+ display.warning(warning, unique=True)
+
+ self.data = dict(
+ python_paths=python_paths,
+ pip_paths=pip_paths,
+ program_versions=program_versions,
+ pip_interpreters=pip_interpreters,
+ known_hosts_hash=known_hosts_hash,
+ warnings=warnings,
+ )
+
+ @staticmethod
+ def check_python_pip_association(version, python_paths, pip_paths, pip_interpreters, warnings):
+ """
+ :type version: str
+ :param python_paths: dict[str, str]
+ :param pip_paths: dict[str, str]
+ :param pip_interpreters: dict[str, str]
+ :param warnings: list[str]
+ """
+ python_label = 'Python%s' % (' %s' % version if version else '')
+
+ pip_path = pip_paths.get(version)
+ python_path = python_paths.get(version)
+
+ if not python_path and not pip_path:
+ # neither python or pip is present for this version
+ return
+
+ if not python_path:
+ warnings.append('A %s interpreter was not found, yet a matching pip was found at "%s".' % (python_label, pip_path))
+ return
+
+ if not pip_path:
+ warnings.append('A %s interpreter was found at "%s", yet a matching pip was not found.' % (python_label, python_path))
+ return
+
+ pip_shebang = pip_interpreters.get(version)
+
+ match = re.search(r'#!\s*(?P<command>[^\s]+)', pip_shebang)
+
+ if not match:
+ warnings.append('A %s pip was found at "%s", but it does not have a valid shebang: %s' % (python_label, pip_path, pip_shebang))
+ return
+
+ pip_interpreter = os.path.realpath(match.group('command'))
+ python_interpreter = os.path.realpath(python_path)
+
+ if pip_interpreter == python_interpreter:
+ return
+
+ try:
+ identical = filecmp.cmp(pip_interpreter, python_interpreter)
+ except OSError:
+ identical = False
+
+ if identical:
+ return
+
+ warnings.append('A %s pip was found at "%s", but it uses interpreter "%s" instead of "%s".' % (
+ python_label, pip_path, pip_interpreter, python_interpreter))
+
+ def __str__(self):
+ """
+ :rtype: str
+ """
+ return json.dumps(self.data, sort_keys=True, indent=4)
+
+ def validate(self, target_name, throw):
+ """
+ :type target_name: str
+ :type throw: bool
+ :rtype: bool
+ """
+ current = EnvironmentDescription(self.args)
+
+ return self.check(self, current, target_name, throw)
+
+ @staticmethod
+ def check(original, current, target_name, throw):
+ """
+ :type original: EnvironmentDescription
+ :type current: EnvironmentDescription
+ :type target_name: str
+ :type throw: bool
+ :rtype: bool
+ """
+ original_json = str(original)
+ current_json = str(current)
+
+ if original_json == current_json:
+ return True
+
+ unified_diff = '\n'.join(difflib.unified_diff(
+ a=original_json.splitlines(),
+ b=current_json.splitlines(),
+ fromfile='original.json',
+ tofile='current.json',
+ lineterm='',
+ ))
+
+ message = ('Test target "%s" has changed the test environment!\n'
+ 'If these changes are necessary, they must be reverted before the test finishes.\n'
+ '>>> Original Environment\n'
+ '%s\n'
+ '>>> Current Environment\n'
+ '%s\n'
+ '>>> Environment Diff\n'
+ '%s'
+ % (target_name, original_json, current_json, unified_diff))
+
+ if throw:
+ raise ApplicationError(message)
+
+ display.error(message)
+
+ return False
+
+ @staticmethod
+ def get_version(command, warnings):
+ """
+ :type command: list[str]
+ :type warnings: list[text]
+ :rtype: list[str]
+ """
+ try:
+ stdout, stderr = raw_command(command, capture=True, cmd_verbosity=2)
+ except SubprocessError as ex:
+ warnings.append(u'%s' % ex)
+ return None # all failures are equal, we don't care why it failed, only that it did
+
+ return [line.strip() for line in ((stdout or '').strip() + (stderr or '').strip()).splitlines()]
+
+ @staticmethod
+ def get_shebang(path):
+ """
+ :type path: str
+ :rtype: str
+ """
+ with open_text_file(path) as script_fd:
+ return script_fd.readline().strip()
+
+ @staticmethod
+ def get_hash(path):
+ """
+ :type path: str
+ :rtype: str | None
+ """
+ if not os.path.exists(path):
+ return None
+
+ file_hash = hashlib.md5()
+
+ file_hash.update(read_binary_file(path))
+
+ return file_hash.hexdigest()
+
+
+class NoChangesDetected(ApplicationWarning):
+ """Exception when change detection was performed, but no changes were found."""
+ def __init__(self):
+ super(NoChangesDetected, self).__init__('No changes detected.')
+
+
+class NoTestsForChanges(ApplicationWarning):
+ """Exception when changes detected, but no tests trigger as a result."""
+ def __init__(self):
+ super(NoTestsForChanges, self).__init__('No tests found for detected changes.')
+
+
+class Delegate(Exception):
+ """Trigger command delegation."""
+ def __init__(self, exclude=None, require=None, integration_targets=None):
+ """
+ :type exclude: list[str] | None
+ :type require: list[str] | None
+ :type integration_targets: tuple[IntegrationTarget] | None
+ """
+ super(Delegate, self).__init__()
+
+ self.exclude = exclude or []
+ self.require = require or []
+ self.integration_targets = integration_targets or tuple()
+
+
+class AllTargetsSkipped(ApplicationWarning):
+ """All targets skipped."""
+ def __init__(self):
+ super(AllTargetsSkipped, self).__init__('All targets skipped.')
diff --git a/test/lib/ansible_test/_internal/git.py b/test/lib/ansible_test/_internal/git.py
new file mode 100644
index 00000000..acc39f3f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/git.py
@@ -0,0 +1,137 @@
+"""Wrapper around git command-line tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from . import types as t
+
+from .util import (
+ SubprocessError,
+ raw_command,
+)
+
+
+class Git:
+ """Wrapper around git command-line tools."""
+ def __init__(self, root=None): # type: (t.Optional[str]) -> None
+ self.git = 'git'
+ self.root = root
+
+ def get_diff(self, args, git_options=None):
+ """
+ :type args: list[str]
+ :type git_options: list[str] | None
+ :rtype: list[str]
+ """
+ cmd = ['diff'] + args
+ if git_options is None:
+ git_options = ['-c', 'core.quotePath=']
+ return self.run_git_split(git_options + cmd, '\n', str_errors='replace')
+
+ def get_diff_names(self, args):
+ """
+ :type args: list[str]
+ :rtype: list[str]
+ """
+ cmd = ['diff', '--name-only', '--no-renames', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_submodule_paths(self): # type: () -> t.List[str]
+ """Return a list of submodule paths recursively."""
+ cmd = ['submodule', 'status', '--recursive']
+ output = self.run_git_split(cmd, '\n')
+ submodule_paths = [re.search(r'^.[0-9a-f]+ (?P<path>[^ ]+)', line).group('path') for line in output]
+
+ # status is returned for all submodules in the current git repository relative to the current directory
+ # when the current directory is not the root of the git repository this can yield relative paths which are not below the current directory
+ # this can occur when multiple collections are in a git repo and some collections are submodules when others are not
+ # specifying "." as the path to enumerate would limit results to the current directory, but can cause the git command to fail with the error:
+ # error: pathspec '.' did not match any file(s) known to git
+ # this can occur when the current directory contains no files tracked by git
+ # instead we'll filter out the relative paths, since we're only interested in those at or below the current directory
+ submodule_paths = [path for path in submodule_paths if not path.startswith('../')]
+
+ return submodule_paths
+
+ def get_file_names(self, args):
+ """
+ :type args: list[str]
+ :rtype: list[str]
+ """
+ cmd = ['ls-files', '-z'] + args
+ return self.run_git_split(cmd, '\0')
+
+ def get_branches(self):
+ """
+ :rtype: list[str]
+ """
+ cmd = ['for-each-ref', 'refs/heads/', '--format', '%(refname:strip=2)']
+ return self.run_git_split(cmd)
+
+ def get_branch(self):
+ """
+ :rtype: str
+ """
+ cmd = ['symbolic-ref', '--short', 'HEAD']
+ return self.run_git(cmd).strip()
+
+ def get_rev_list(self, commits=None, max_count=None):
+ """
+ :type commits: list[str] | None
+ :type max_count: int | None
+ :rtype: list[str]
+ """
+ cmd = ['rev-list']
+
+ if commits:
+ cmd += commits
+ else:
+ cmd += ['HEAD']
+
+ if max_count:
+ cmd += ['--max-count', '%s' % max_count]
+
+ return self.run_git_split(cmd)
+
+ def get_branch_fork_point(self, branch):
+ """
+ :type branch: str
+ :rtype: str
+ """
+ cmd = ['merge-base', '--fork-point', branch]
+ return self.run_git(cmd).strip()
+
+ def is_valid_ref(self, ref):
+ """
+ :type ref: str
+ :rtype: bool
+ """
+ cmd = ['show', ref]
+ try:
+ self.run_git(cmd, str_errors='replace')
+ return True
+ except SubprocessError:
+ return False
+
+ def run_git_split(self, cmd, separator=None, str_errors='strict'):
+ """
+ :type cmd: list[str]
+ :type separator: str | None
+ :type str_errors: str
+ :rtype: list[str]
+ """
+ output = self.run_git(cmd, str_errors=str_errors).strip(separator)
+
+ if not output:
+ return []
+
+ return output.split(separator)
+
+ def run_git(self, cmd, str_errors='strict'):
+ """
+ :type cmd: list[str]
+ :type str_errors: str
+ :rtype: str
+ """
+ return raw_command([self.git] + cmd, cwd=self.root, capture=True, str_errors=str_errors)[0]
diff --git a/test/lib/ansible_test/_internal/http.py b/test/lib/ansible_test/_internal/http.py
new file mode 100644
index 00000000..6607a10b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/http.py
@@ -0,0 +1,181 @@
+"""
+Primitive replacement for requests to avoid extra dependency.
+Avoids use of urllib2 due to lack of SNI support.
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import time
+
+try:
+ from urllib import urlencode
+except ImportError:
+ # noinspection PyCompatibility, PyUnresolvedReferences
+ from urllib.parse import urlencode # pylint: disable=locally-disabled, import-error, no-name-in-module
+
+try:
+ # noinspection PyCompatibility
+ from urlparse import urlparse, urlunparse, parse_qs
+except ImportError:
+ # noinspection PyCompatibility, PyUnresolvedReferences
+ from urllib.parse import urlparse, urlunparse, parse_qs # pylint: disable=locally-disabled, ungrouped-imports
+
+from .util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+)
+
+from .util_common import (
+ CommonConfig,
+ run_command,
+)
+
+
+class HttpClient:
+ """Make HTTP requests via curl."""
+ def __init__(self, args, always=False, insecure=False, proxy=None):
+ """
+ :type args: CommonConfig
+ :type always: bool
+ :type insecure: bool
+ """
+ self.args = args
+ self.always = always
+ self.insecure = insecure
+ self.proxy = proxy
+
+ self.username = None
+ self.password = None
+
+ def get(self, url):
+ """
+ :type url: str
+ :rtype: HttpResponse
+ """
+ return self.request('GET', url)
+
+ def delete(self, url):
+ """
+ :type url: str
+ :rtype: HttpResponse
+ """
+ return self.request('DELETE', url)
+
+ def put(self, url, data=None, headers=None):
+ """
+ :type url: str
+ :type data: str | None
+ :type headers: dict[str, str] | None
+ :rtype: HttpResponse
+ """
+ return self.request('PUT', url, data, headers)
+
+ def request(self, method, url, data=None, headers=None):
+ """
+ :type method: str
+ :type url: str
+ :type data: str | None
+ :type headers: dict[str, str] | None
+ :rtype: HttpResponse
+ """
+ cmd = ['curl', '-s', '-S', '-i', '-X', method]
+
+ if self.insecure:
+ cmd += ['--insecure']
+
+ if headers is None:
+ headers = {}
+
+ headers['Expect'] = '' # don't send expect continue header
+
+ if self.username:
+ if self.password:
+ display.sensitive.add(self.password)
+ cmd += ['-u', '%s:%s' % (self.username, self.password)]
+ else:
+ cmd += ['-u', self.username]
+
+ for header in headers.keys():
+ cmd += ['-H', '%s: %s' % (header, headers[header])]
+
+ if data is not None:
+ cmd += ['-d', data]
+
+ if self.proxy:
+ cmd += ['-x', self.proxy]
+
+ cmd += [url]
+
+ attempts = 0
+ max_attempts = 3
+ sleep_seconds = 3
+
+ # curl error codes which are safe to retry (request never sent to server)
+ retry_on_status = (
+ 6, # CURLE_COULDNT_RESOLVE_HOST
+ )
+
+ stdout = ''
+
+ while True:
+ attempts += 1
+
+ try:
+ stdout = run_command(self.args, cmd, capture=True, always=self.always, cmd_verbosity=2)[0]
+ break
+ except SubprocessError as ex:
+ if ex.status in retry_on_status and attempts < max_attempts:
+ display.warning(u'%s' % ex)
+ time.sleep(sleep_seconds)
+ continue
+
+ raise
+
+ if self.args.explain and not self.always:
+ return HttpResponse(method, url, 200, '')
+
+ header, body = stdout.split('\r\n\r\n', 1)
+
+ response_headers = header.split('\r\n')
+ first_line = response_headers[0]
+ http_response = first_line.split(' ')
+ status_code = int(http_response[1])
+
+ return HttpResponse(method, url, status_code, body)
+
+
+class HttpResponse:
+ """HTTP response from curl."""
+ def __init__(self, method, url, status_code, response):
+ """
+ :type method: str
+ :type url: str
+ :type status_code: int
+ :type response: str
+ """
+ self.method = method
+ self.url = url
+ self.status_code = status_code
+ self.response = response
+
+ def json(self):
+ """
+ :rtype: any
+ """
+ try:
+ return json.loads(self.response)
+ except ValueError:
+ raise HttpError(self.status_code, 'Cannot parse response to %s %s as JSON:\n%s' % (self.method, self.url, self.response))
+
+
+class HttpError(ApplicationError):
+ """HTTP response as an error."""
+ def __init__(self, status, message):
+ """
+ :type status: int
+ :type message: str
+ """
+ super(HttpError, self).__init__('%s: %s' % (status, message))
+ self.status = status
diff --git a/test/lib/ansible_test/_internal/import_analysis.py b/test/lib/ansible_test/_internal/import_analysis.py
new file mode 100644
index 00000000..9cc5376f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/import_analysis.py
@@ -0,0 +1,362 @@
+"""Analyze python import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import os
+import re
+
+from . import types as t
+
+from .io import (
+ read_binary_file,
+)
+
+from .util import (
+ display,
+ ApplicationError,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+VIRTUAL_PACKAGES = set([
+ 'ansible.module_utils.six',
+])
+
+
+def get_python_module_utils_imports(compile_targets):
+ """Return a dictionary of module_utils names mapped to sets of python file paths.
+ :type compile_targets: list[TestTarget]
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
+ module_utils -= virtual_utils
+
+ imports_by_target_path = {}
+
+ for target in compile_targets:
+ imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
+
+ def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
+ """Recursively expand module_utils imports from module_utils files."""
+ display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
+
+ if seen is None:
+ seen = set([import_name])
+
+ results = set([import_name])
+
+ # virtual packages depend on the modules they contain instead of the reverse
+ if import_name in VIRTUAL_PACKAGES:
+ for sub_import in sorted(virtual_utils):
+ if sub_import.startswith('%s.' % import_name):
+ if sub_import in seen:
+ continue
+
+ seen.add(sub_import)
+
+ matches = sorted(recurse_import(sub_import, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ import_path = get_import_path(import_name)
+
+ if import_path not in imports_by_target_path:
+ import_path = get_import_path(import_name, package=True)
+
+ if import_path not in imports_by_target_path:
+ raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
+
+ # process imports in reverse so the deepest imports come first
+ for name in sorted(imports_by_target_path[import_path], reverse=True):
+ if name in virtual_utils:
+ continue
+
+ if name in seen:
+ continue
+
+ seen.add(name)
+
+ matches = sorted(recurse_import(name, depth + 1, seen))
+
+ for result in matches:
+ results.add(result)
+
+ return results
+
+ for module_util in module_utils:
+ # recurse over module_utils imports while excluding self
+ module_util_imports = recurse_import(module_util)
+ module_util_imports.remove(module_util)
+
+ # add recursive imports to all path entries which import this module_util
+ for target_path in imports_by_target_path:
+ if module_util in imports_by_target_path[target_path]:
+ for module_util_import in sorted(module_util_imports):
+ if module_util_import not in imports_by_target_path[target_path]:
+ display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
+ imports_by_target_path[target_path].add(module_util_import)
+
+ imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ # for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
+ for virtual_util in virtual_utils:
+ parent_package = '.'.join(virtual_util.split('.')[:-1])
+ imports[virtual_util] = imports[parent_package]
+ display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ package_path = get_import_path(module_util, package=True)
+
+ if os.path.exists(package_path) and not os.path.getsize(package_path):
+ continue # ignore empty __init__.py files
+
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_python_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
+ else:
+ prefix = 'ansible.module_utils'
+
+ if path.endswith('/__init__.py'):
+ path = os.path.dirname(path)
+
+ if path == base_path:
+ name = prefix
+ else:
+ name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ module_utils = []
+
+ for path in data_context().content.walk_files(data_context().content.module_utils_path):
+ ext = os.path.splitext(path)[1]
+
+ if ext != '.py':
+ continue
+
+ module_utils.append(get_python_module_utils_name(path))
+
+ return set(module_utils)
+
+
+def extract_python_module_utils_imports(path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :rtype: set[str]
+ """
+ # Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
+ # See: https://www.python.org/dev/peps/pep-0263
+ # Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
+ code = read_binary_file(path)
+
+ try:
+ tree = ast.parse(code)
+ except SyntaxError as ex:
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # The compile test will detect and report this syntax error.
+ display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
+ return set()
+
+ finder = ModuleUtilFinder(path, module_utils)
+ finder.visit(tree)
+ return finder.imports
+
+
+def get_import_path(name, package=False): # type: (str, bool) -> str
+ """Return a path from an import name."""
+ if package:
+ filename = os.path.join(name.replace('.', '/'), '__init__.py')
+ else:
+ filename = '%s.py' % name.replace('.', '/')
+
+ if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
+ path = os.path.join('lib', filename)
+ elif data_context().content.collection and (
+ name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
+ name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
+ path = '/'.join(filename.split('/')[3:])
+ else:
+ raise Exception('Unexpected import name: %s' % name)
+
+ return path
+
+
+def path_to_module(path): # type: (str) -> str
+ """Convert the given path to a module name."""
+ module = os.path.splitext(path)[0].replace(os.path.sep, '.')
+
+ if module.endswith('.__init__'):
+ module = module[:-9]
+
+ return module
+
+
+def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
+ """Convert a relative import to an absolute import."""
+ if level <= 0:
+ absolute_name = name
+ elif not module:
+ display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
+ absolute_name = 'relative.nomodule'
+ else:
+ parts = module.split('.')
+
+ if level >= len(parts):
+ display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
+ absolute_name = 'relative.abovelevel'
+ else:
+ absolute_name = '.'.join(parts[:-level] + [name])
+
+ return absolute_name
+
+
+class ModuleUtilFinder(ast.NodeVisitor):
+ """AST visitor to find valid module_utils imports."""
+ def __init__(self, path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ """
+ self.path = path
+ self.module_utils = module_utils
+ self.imports = set()
+
+ # implicitly import parent package
+
+ if path.endswith('/__init__.py'):
+ path = os.path.split(path)[0]
+
+ if path.startswith('lib/ansible/module_utils/'):
+ package = os.path.split(path)[0].replace('/', '.')[4:]
+
+ if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
+ self.add_import(package, 0)
+
+ self.module = None
+
+ if data_context().content.is_ansible:
+ # Various parts of the Ansible source tree execute within diffent modules.
+ # To support import analysis, each file which uses relative imports must reside under a path defined here.
+ # The mapping is a tuple consisting of a path pattern to match and a replacement path.
+ # During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
+ path_map = (
+ ('^hacking/build_library/build_ansible/', 'build_ansible/'),
+ ('^lib/ansible/', 'ansible/'),
+ ('^test/lib/ansible_test/_data/sanity/validate-modules/', 'validate_modules/'),
+ ('^test/units/', 'test/units/'),
+ ('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
+ ('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
+ ('^test/integration/targets/.*/library/', 'ansible/modules/'),
+ )
+
+ for pattern, replacement in path_map:
+ if re.search(pattern, self.path):
+ revised_path = re.sub(pattern, replacement, self.path)
+ self.module = path_to_module(revised_path)
+ break
+ else:
+ # This assumes that all files within the collection are executed by Ansible as part of the collection.
+ # While that will usually be true, there are exceptions which will result in this resolution being incorrect.
+ self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
+
+ # noinspection PyPep8Naming
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_Import(self, node):
+ """
+ :type node: ast.Import
+ """
+ self.generic_visit(node)
+
+ # import ansible.module_utils.MODULE[.MODULE]
+ # import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
+ self.add_imports([alias.name for alias in node.names], node.lineno)
+
+ # noinspection PyPep8Naming
+ # pylint: disable=locally-disabled, invalid-name
+ def visit_ImportFrom(self, node):
+ """
+ :type node: ast.ImportFrom
+ """
+ self.generic_visit(node)
+
+ if not node.module:
+ return
+
+ module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
+
+ if not module.startswith('ansible'):
+ return
+
+ # from ansible.module_utils import MODULE[, MODULE]
+ # from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
+ # from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
+ self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
+
+ def add_import(self, name, line_number):
+ """
+ :type name: str
+ :type line_number: int
+ """
+ import_name = name
+
+ while self.is_module_util_name(name):
+ if name in self.module_utils:
+ if name not in self.imports:
+ display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
+ self.imports.add(name)
+
+ return # duplicate imports are ignored
+
+ name = '.'.join(name.split('.')[:-1])
+
+ if is_subdir(self.path, data_context().content.test_path):
+ return # invalid imports in tests are ignored
+
+ # Treat this error as a warning so tests can be executed as best as possible.
+ # This error should be detected by unit or integration tests.
+ display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
+
+ def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
+ """Add the given import names if they are module_utils imports."""
+ for name in names:
+ if self.is_module_util_name(name):
+ self.add_import(name, line_no)
+
+ @staticmethod
+ def is_module_util_name(name): # type: (str) -> bool
+ """Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
+ if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
+ return True
+
+ if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
+ return True
+
+ return False
diff --git a/test/lib/ansible_test/_internal/init.py b/test/lib/ansible_test/_internal/init.py
new file mode 100644
index 00000000..682e6b0c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/init.py
@@ -0,0 +1,16 @@
+"""Early initialization for ansible-test before most other imports have been performed."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import resource
+
+from .constants import (
+ SOFT_RLIMIT_NOFILE,
+)
+
+CURRENT_RLIMIT_NOFILE = resource.getrlimit(resource.RLIMIT_NOFILE)
+DESIRED_RLIMIT_NOFILE = (SOFT_RLIMIT_NOFILE, CURRENT_RLIMIT_NOFILE[1])
+
+if DESIRED_RLIMIT_NOFILE < CURRENT_RLIMIT_NOFILE:
+ resource.setrlimit(resource.RLIMIT_NOFILE, DESIRED_RLIMIT_NOFILE)
+ CURRENT_RLIMIT_NOFILE = DESIRED_RLIMIT_NOFILE
diff --git a/test/lib/ansible_test/_internal/integration/__init__.py b/test/lib/ansible_test/_internal/integration/__init__.py
new file mode 100644
index 00000000..f7be34e7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/integration/__init__.py
@@ -0,0 +1,349 @@
+"""Ansible integration test infrastructure."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import json
+import os
+import shutil
+import tempfile
+
+from .. import types as t
+
+from ..encoding import (
+ to_bytes,
+)
+
+from ..target import (
+ analyze_integration_target_dependencies,
+ walk_integration_targets,
+)
+
+from ..config import (
+ IntegrationConfig,
+ NetworkIntegrationConfig,
+ PosixIntegrationConfig,
+ WindowsIntegrationConfig,
+)
+
+from ..io import (
+ make_dirs,
+ write_text_file,
+ read_text_file,
+)
+
+from ..util import (
+ ApplicationError,
+ display,
+ COVERAGE_CONFIG_NAME,
+ MODE_DIRECTORY,
+ MODE_DIRECTORY_WRITE,
+ MODE_FILE,
+)
+
+from ..util_common import (
+ named_temporary_file,
+ ResultType,
+)
+
+from ..coverage_util import (
+ generate_coverage_config,
+)
+
+from ..cache import (
+ CommonCache,
+)
+
+from ..cloud import (
+ CloudEnvironmentConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+def setup_common_temp_dir(args, path):
+ """
+ :type args: IntegrationConfig
+ :type path: str
+ """
+ if args.explain:
+ return
+
+ os.mkdir(path)
+ os.chmod(path, MODE_DIRECTORY)
+
+ if args.coverage:
+ coverage_config_path = os.path.join(path, COVERAGE_CONFIG_NAME)
+
+ coverage_config = generate_coverage_config(args)
+
+ write_text_file(coverage_config_path, coverage_config)
+
+ os.chmod(coverage_config_path, MODE_FILE)
+
+ coverage_output_path = os.path.join(path, ResultType.COVERAGE.name)
+
+ os.mkdir(coverage_output_path)
+ os.chmod(coverage_output_path, MODE_DIRECTORY_WRITE)
+
+
+def generate_dependency_map(integration_targets):
+ """
+ :type integration_targets: list[IntegrationTarget]
+ :rtype: dict[str, set[IntegrationTarget]]
+ """
+ targets_dict = dict((target.name, target) for target in integration_targets)
+ target_dependencies = analyze_integration_target_dependencies(integration_targets)
+ dependency_map = {}
+
+ invalid_targets = set()
+
+ for dependency, dependents in target_dependencies.items():
+ dependency_target = targets_dict.get(dependency)
+
+ if not dependency_target:
+ invalid_targets.add(dependency)
+ continue
+
+ for dependent in dependents:
+ if dependent not in dependency_map:
+ dependency_map[dependent] = set()
+
+ dependency_map[dependent].add(dependency_target)
+
+ if invalid_targets:
+ raise ApplicationError('Non-existent target dependencies: %s' % ', '.join(sorted(invalid_targets)))
+
+ return dependency_map
+
+
+def get_files_needed(target_dependencies):
+ """
+ :type target_dependencies: list[IntegrationTarget]
+ :rtype: list[str]
+ """
+ files_needed = []
+
+ for target_dependency in target_dependencies:
+ files_needed += target_dependency.needs_file
+
+ files_needed = sorted(set(files_needed))
+
+ invalid_paths = [path for path in files_needed if not os.path.isfile(path)]
+
+ if invalid_paths:
+ raise ApplicationError('Invalid "needs/file/*" aliases:\n%s' % '\n'.join(invalid_paths))
+
+ return files_needed
+
+
+def check_inventory(args, inventory_path): # type: (IntegrationConfig, str) -> None
+ """Check the given inventory for issues."""
+ if args.docker or args.remote:
+ if os.path.exists(inventory_path):
+ inventory = read_text_file(inventory_path)
+
+ if 'ansible_ssh_private_key_file' in inventory:
+ display.warning('Use of "ansible_ssh_private_key_file" in inventory with the --docker or --remote option is unsupported and will likely fail.')
+
+
+def get_inventory_relative_path(args): # type: (IntegrationConfig) -> str
+ """Return the inventory path used for the given integration configuration relative to the content root."""
+ inventory_names = {
+ PosixIntegrationConfig: 'inventory',
+ WindowsIntegrationConfig: 'inventory.winrm',
+ NetworkIntegrationConfig: 'inventory.networking',
+ } # type: t.Dict[t.Type[IntegrationConfig], str]
+
+ return os.path.join(data_context().content.integration_path, inventory_names[type(args)])
+
+
+def delegate_inventory(args, inventory_path_src): # type: (IntegrationConfig, str) -> None
+ """Make the given inventory available during delegation."""
+ if isinstance(args, PosixIntegrationConfig):
+ return
+
+ def inventory_callback(files): # type: (t.List[t.Tuple[str, str]]) -> None
+ """
+ Add the inventory file to the payload file list.
+ This will preserve the file during delegation even if it is ignored or is outside the content and install roots.
+ """
+ inventory_path = get_inventory_relative_path(args)
+ inventory_tuple = inventory_path_src, inventory_path
+
+ if os.path.isfile(inventory_path_src) and inventory_tuple not in files:
+ originals = [item for item in files if item[1] == inventory_path]
+
+ if originals:
+ for original in originals:
+ files.remove(original)
+
+ display.warning('Overriding inventory file "%s" with "%s".' % (inventory_path, inventory_path_src))
+ else:
+ display.notice('Sourcing inventory file "%s" from "%s".' % (inventory_path, inventory_path_src))
+
+ files.append(inventory_tuple)
+
+ data_context().register_payload_callback(inventory_callback)
+
+
+@contextlib.contextmanager
+def integration_test_environment(args, target, inventory_path_src):
+ """
+ :type args: IntegrationConfig
+ :type target: IntegrationTarget
+ :type inventory_path_src: str
+ """
+ ansible_config_src = args.get_ansible_config()
+ ansible_config_relative = os.path.join(data_context().content.integration_path, '%s.cfg' % args.command)
+
+ if args.no_temp_workdir or 'no/temp_workdir/' in target.aliases:
+ display.warning('Disabling the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+
+ integration_dir = os.path.join(data_context().content.root, data_context().content.integration_path)
+ targets_dir = os.path.join(data_context().content.root, data_context().content.integration_targets_path)
+ inventory_path = inventory_path_src
+ ansible_config = ansible_config_src
+ vars_file = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+
+ yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ return
+
+ # When testing a collection, the temporary directory must reside within the collection.
+ # This is necessary to enable support for the default collection for non-collection content (playbooks and roles).
+ root_temp_dir = os.path.join(ResultType.TMP.path, 'integration')
+
+ prefix = '%s-' % target.name
+ suffix = u'-\u00c5\u00d1\u015a\u00cc\u03b2\u0141\u00c8'
+
+ if args.no_temp_unicode or 'no/temp_unicode/' in target.aliases:
+ display.warning('Disabling unicode in the temp work dir is a temporary debugging feature that may be removed in the future without notice.')
+ suffix = '-ansible'
+
+ if args.explain:
+ temp_dir = os.path.join(root_temp_dir, '%stemp%s' % (prefix, suffix))
+ else:
+ make_dirs(root_temp_dir)
+ temp_dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+
+ try:
+ display.info('Preparing temporary directory: %s' % temp_dir, verbosity=2)
+
+ inventory_relative_path = get_inventory_relative_path(args)
+ inventory_path = os.path.join(temp_dir, inventory_relative_path)
+
+ cache = IntegrationCache(args)
+
+ target_dependencies = sorted([target] + list(cache.dependency_map.get(target.name, set())))
+
+ files_needed = get_files_needed(target_dependencies)
+
+ integration_dir = os.path.join(temp_dir, data_context().content.integration_path)
+ targets_dir = os.path.join(temp_dir, data_context().content.integration_targets_path)
+ ansible_config = os.path.join(temp_dir, ansible_config_relative)
+
+ vars_file_src = os.path.join(data_context().content.root, data_context().content.integration_vars_path)
+ vars_file = os.path.join(temp_dir, data_context().content.integration_vars_path)
+
+ file_copies = [
+ (ansible_config_src, ansible_config),
+ (inventory_path_src, inventory_path),
+ ]
+
+ if os.path.exists(vars_file_src):
+ file_copies.append((vars_file_src, vars_file))
+
+ file_copies += [(path, os.path.join(temp_dir, path)) for path in files_needed]
+
+ integration_targets_relative_path = data_context().content.integration_targets_path
+
+ directory_copies = [
+ (
+ os.path.join(integration_targets_relative_path, target.relative_path),
+ os.path.join(temp_dir, integration_targets_relative_path, target.relative_path)
+ )
+ for target in target_dependencies
+ ]
+
+ directory_copies = sorted(set(directory_copies))
+ file_copies = sorted(set(file_copies))
+
+ if not args.explain:
+ make_dirs(integration_dir)
+
+ for dir_src, dir_dst in directory_copies:
+ display.info('Copying %s/ to %s/' % (dir_src, dir_dst), verbosity=2)
+
+ if not args.explain:
+ shutil.copytree(to_bytes(dir_src), to_bytes(dir_dst), symlinks=True)
+
+ for file_src, file_dst in file_copies:
+ display.info('Copying %s to %s' % (file_src, file_dst), verbosity=2)
+
+ if not args.explain:
+ make_dirs(os.path.dirname(file_dst))
+ shutil.copy2(file_src, file_dst)
+
+ yield IntegrationEnvironment(integration_dir, targets_dir, inventory_path, ansible_config, vars_file)
+ finally:
+ if not args.explain:
+ shutil.rmtree(temp_dir)
+
+
+@contextlib.contextmanager
+def integration_test_config_file(args, env_config, integration_dir):
+ """
+ :type args: IntegrationConfig
+ :type env_config: CloudEnvironmentConfig
+ :type integration_dir: str
+ """
+ if not env_config:
+ yield None
+ return
+
+ config_vars = (env_config.ansible_vars or {}).copy()
+
+ config_vars.update(dict(
+ ansible_test=dict(
+ environment=env_config.env_vars,
+ module_defaults=env_config.module_defaults,
+ )
+ ))
+
+ config_file = json.dumps(config_vars, indent=4, sort_keys=True)
+
+ with named_temporary_file(args, 'config-file-', '.json', integration_dir, config_file) as path:
+ filename = os.path.relpath(path, integration_dir)
+
+ display.info('>>> Config File: %s\n%s' % (filename, config_file), verbosity=3)
+
+ yield path
+
+
+class IntegrationEnvironment:
+ """Details about the integration environment."""
+ def __init__(self, integration_dir, targets_dir, inventory_path, ansible_config, vars_file):
+ self.integration_dir = integration_dir
+ self.targets_dir = targets_dir
+ self.inventory_path = inventory_path
+ self.ansible_config = ansible_config
+ self.vars_file = vars_file
+
+
+class IntegrationCache(CommonCache):
+ """Integration cache."""
+ @property
+ def integration_targets(self):
+ """
+ :rtype: list[IntegrationTarget]
+ """
+ return self.get('integration_targets', lambda: list(walk_integration_targets()))
+
+ @property
+ def dependency_map(self):
+ """
+ :rtype: dict[str, set[IntegrationTarget]]
+ """
+ return self.get('dependency_map', lambda: generate_dependency_map(self.integration_targets))
diff --git a/test/lib/ansible_test/_internal/io.py b/test/lib/ansible_test/_internal/io.py
new file mode 100644
index 00000000..0f61cd2d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/io.py
@@ -0,0 +1,94 @@
+"""Functions for disk IO."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import io
+import json
+import os
+
+from . import types as t
+
+from .encoding import (
+ ENCODING,
+ to_bytes,
+ to_text,
+)
+
+
+def read_json_file(path): # type: (t.AnyStr) -> t.Any
+ """Parse and return the json content from the specified path."""
+ return json.loads(read_text_file(path))
+
+
+def read_text_file(path): # type: (t.AnyStr) -> t.Text
+ """Return the contents of the specified path as text."""
+ return to_text(read_binary_file(path))
+
+
+def read_binary_file(path): # type: (t.AnyStr) -> bytes
+ """Return the contents of the specified path as bytes."""
+ with open_binary_file(path) as file:
+ return file.read()
+
+
+def make_dirs(path): # type: (str) -> None
+ """Create a directory at path, including any necessary parent directories."""
+ try:
+ os.makedirs(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+
+def write_json_file(path, # type: str
+ content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]]
+ create_directories=False, # type: bool
+ formatted=True, # type: bool
+ encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]]
+ ): # type: (...) -> None
+ """Write the given json content to the specified path, optionally creating missing directories."""
+ text_content = json.dumps(content,
+ sort_keys=formatted,
+ indent=4 if formatted else None,
+ separators=(', ', ': ') if formatted else (',', ':'),
+ cls=encoder,
+ ) + '\n'
+
+ write_text_file(path, text_content, create_directories=create_directories)
+
+
+def write_text_file(path, content, create_directories=False): # type: (str, str, bool) -> None
+ """Write the given text content to the specified path, optionally creating missing directories."""
+ if create_directories:
+ make_dirs(os.path.dirname(path))
+
+ with open_binary_file(path, 'wb') as file:
+ file.write(to_bytes(content))
+
+
+def open_text_file(path, mode='r'): # type: (str, str) -> t.TextIO
+ """Open the given path for text access."""
+ if 'b' in mode:
+ raise Exception('mode cannot include "b" for text files: %s' % mode)
+
+ # noinspection PyTypeChecker
+ return io.open(to_bytes(path), mode, encoding=ENCODING)
+
+
+def open_binary_file(path, mode='rb'): # type: (str, str) -> t.BinaryIO
+ """Open the given path for binary access."""
+ if 'b' not in mode:
+ raise Exception('mode must include "b" for binary files: %s' % mode)
+
+ # noinspection PyTypeChecker
+ return io.open(to_bytes(path), mode)
+
+
+class SortedSetEncoder(json.JSONEncoder):
+ """Encode sets as sorted lists."""
+ def default(self, obj): # pylint: disable=method-hidden, arguments-differ
+ if isinstance(obj, set):
+ return sorted(obj)
+
+ return super(SortedSetEncoder).default(self, obj)
diff --git a/test/lib/ansible_test/_internal/manage_ci.py b/test/lib/ansible_test/_internal/manage_ci.py
new file mode 100644
index 00000000..e81dad68
--- /dev/null
+++ b/test/lib/ansible_test/_internal/manage_ci.py
@@ -0,0 +1,335 @@
+"""Access Ansible Core CI remote services."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import tempfile
+import time
+
+from .util import (
+ SubprocessError,
+ ApplicationError,
+ cmd_quote,
+ display,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from .util_common import (
+ intercept_command,
+ get_network_settings,
+ run_command,
+)
+
+from .core_ci import (
+ AnsibleCoreCI,
+)
+
+from .ansible_util import (
+ ansible_environment,
+)
+
+from .config import (
+ ShellConfig,
+)
+
+from .payload import (
+ create_payload,
+)
+
+
+class ManageWindowsCI:
+ """Manage access to a Windows instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+ self.ssh_args = ['-i', self.core_ci.ssh_key.key]
+
+ ssh_options = dict(
+ BatchMode='yes',
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ ServerAliveInterval=15,
+ ServerAliveCountMax=4,
+ )
+
+ for ssh_option in sorted(ssh_options):
+ self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
+
+ def setup(self, python_version):
+ """Used in delegate_remote to setup the host, no action is required for Windows.
+ :type python_version: str
+ """
+
+ def wait(self):
+ """Wait for instance to respond to ansible ping."""
+ extra_vars = [
+ 'ansible_connection=winrm',
+ 'ansible_host=%s' % self.core_ci.connection.hostname,
+ 'ansible_user=%s' % self.core_ci.connection.username,
+ 'ansible_password=%s' % self.core_ci.connection.password,
+ 'ansible_port=%s' % self.core_ci.connection.port,
+ 'ansible_winrm_server_cert_validation=ignore',
+ ]
+
+ name = 'windows_%s' % self.core_ci.version
+
+ env = ansible_environment(self.core_ci.args)
+ cmd = ['ansible', '-m', 'ansible.windows.win_ping', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)]
+
+ for dummy in range(1, 120):
+ try:
+ intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+ def download(self, remote, local):
+ """
+ :type remote: str
+ :type local: str
+ """
+ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
+
+ def upload(self, local, remote):
+ """
+ :type local: str
+ :type remote: str
+ """
+ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
+
+ def ssh(self, command, options=None, force_pty=True):
+ """
+ :type command: str | list[str]
+ :type options: list[str] | None
+ :type force_pty: bool
+ """
+ if not options:
+ options = []
+ if force_pty:
+ options.append('-tt')
+
+ if isinstance(command, list):
+ command = ' '.join(cmd_quote(c) for c in command)
+
+ run_command(self.core_ci.args,
+ ['ssh', '-q'] + self.ssh_args +
+ options +
+ ['-p', '22',
+ '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
+ [command])
+
+ def scp(self, src, dst):
+ """
+ :type src: str
+ :type dst: str
+ """
+ for dummy in range(1, 10):
+ try:
+ run_command(self.core_ci.args,
+ ['scp'] + self.ssh_args +
+ ['-P', '22', '-q', '-r', src, dst])
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
+
+
+class ManageNetworkCI:
+ """Manage access to a network instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+
+ def wait(self):
+ """Wait for instance to respond to ansible ping."""
+ settings = get_network_settings(self.core_ci.args, self.core_ci.platform, self.core_ci.version)
+
+ extra_vars = [
+ 'ansible_host=%s' % self.core_ci.connection.hostname,
+ 'ansible_port=%s' % self.core_ci.connection.port,
+ 'ansible_ssh_private_key_file=%s' % self.core_ci.ssh_key.key,
+ ] + [
+ '%s=%s' % (key, value) for key, value in settings.inventory_vars.items()
+ ]
+
+ name = '%s-%s' % (self.core_ci.platform, self.core_ci.version.replace('.', '-'))
+
+ env = ansible_environment(self.core_ci.args)
+ cmd = [
+ 'ansible',
+ '-m', '%s%s_command' % (settings.collection + '.' if settings.collection else '', self.core_ci.platform),
+ '-a', 'commands=?',
+ '-u', self.core_ci.connection.username,
+ '-i', '%s,' % name,
+ '-e', ' '.join(extra_vars),
+ name,
+ ]
+
+ for dummy in range(1, 90):
+ try:
+ intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+
+class ManagePosixCI:
+ """Manage access to a POSIX instance provided by Ansible Core CI."""
+ def __init__(self, core_ci):
+ """
+ :type core_ci: AnsibleCoreCI
+ """
+ self.core_ci = core_ci
+ self.ssh_args = ['-i', self.core_ci.ssh_key.key]
+
+ ssh_options = dict(
+ BatchMode='yes',
+ StrictHostKeyChecking='no',
+ UserKnownHostsFile='/dev/null',
+ ServerAliveInterval=15,
+ ServerAliveCountMax=4,
+ )
+
+ for ssh_option in sorted(ssh_options):
+ self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
+
+ if self.core_ci.platform == 'freebsd':
+ if self.core_ci.provider == 'aws':
+ self.become = ['su', '-l', 'root', '-c']
+ elif self.core_ci.provider == 'azure':
+ self.become = ['sudo', '-in', 'sh', '-c']
+ else:
+ raise NotImplementedError('provider %s has not been implemented' % self.core_ci.provider)
+ elif self.core_ci.platform == 'macos':
+ self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH', 'sh', '-c']
+ elif self.core_ci.platform == 'osx':
+ self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH']
+ elif self.core_ci.platform == 'rhel' or self.core_ci.platform == 'centos':
+ self.become = ['sudo', '-in', 'bash', '-c']
+ elif self.core_ci.platform in ['aix', 'ibmi']:
+ self.become = []
+
+ def setup(self, python_version):
+ """Start instance and wait for it to become ready and respond to an ansible ping.
+ :type python_version: str
+ :rtype: str
+ """
+ pwd = self.wait()
+
+ display.info('Remote working directory: %s' % pwd, verbosity=1)
+
+ if isinstance(self.core_ci.args, ShellConfig):
+ if self.core_ci.args.raw:
+ return pwd
+
+ self.configure(python_version)
+ self.upload_source()
+
+ return pwd
+
+ def wait(self): # type: () -> str
+ """Wait for instance to respond to SSH."""
+ for dummy in range(1, 90):
+ try:
+ stdout = self.ssh('pwd', capture=True)[0]
+
+ if self.core_ci.args.explain:
+ return '/pwd'
+
+ pwd = stdout.strip().splitlines()[-1]
+
+ if not pwd.startswith('/'):
+ raise Exception('Unexpected current working directory "%s" from "pwd" command output:\n%s' % (pwd, stdout))
+
+ return pwd
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
+ (self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
+
+ def configure(self, python_version):
+ """Configure remote host for testing.
+ :type python_version: str
+ """
+ self.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'remote.sh'), '/tmp')
+ self.ssh('chmod +x /tmp/remote.sh && /tmp/remote.sh %s %s' % (self.core_ci.platform, python_version))
+
+ def upload_source(self):
+ """Upload and extract source."""
+ with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
+ remote_source_dir = '/tmp'
+ remote_source_path = os.path.join(remote_source_dir, os.path.basename(local_source_fd.name))
+
+ create_payload(self.core_ci.args, local_source_fd.name)
+
+ self.upload(local_source_fd.name, remote_source_dir)
+ # AIX does not provide the GNU tar version, leading to parameters
+ # being different and -z not being recognized. This pattern works
+ # with both versions of tar.
+ self.ssh(
+ 'rm -rf ~/ansible ~/ansible_collections && cd ~/ && gunzip --stdout %s | tar oxf - && rm %s' %
+ (remote_source_path, remote_source_path)
+ )
+
+ def download(self, remote, local):
+ """
+ :type remote: str
+ :type local: str
+ """
+ self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
+
+ def upload(self, local, remote):
+ """
+ :type local: str
+ :type remote: str
+ """
+ self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
+
+ def ssh(self, command, options=None, capture=False):
+ """
+ :type command: str | list[str]
+ :type options: list[str] | None
+ :type capture: bool
+ :rtype: str | None, str | None
+ """
+ if not options:
+ options = []
+
+ if isinstance(command, list):
+ command = ' '.join(cmd_quote(c) for c in command)
+
+ command = cmd_quote(command) if self.become else command
+ return run_command(self.core_ci.args,
+ ['ssh', '-tt', '-q'] + self.ssh_args +
+ options +
+ ['-p', str(self.core_ci.connection.port),
+ '%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
+ self.become + [command], capture=capture)
+
+ def scp(self, src, dst):
+ """
+ :type src: str
+ :type dst: str
+ """
+ for dummy in range(1, 10):
+ try:
+ run_command(self.core_ci.args,
+ ['scp'] + self.ssh_args +
+ ['-P', str(self.core_ci.connection.port), '-q', '-r', src, dst])
+ return
+ except SubprocessError:
+ time.sleep(10)
+
+ raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
diff --git a/test/lib/ansible_test/_internal/metadata.py b/test/lib/ansible_test/_internal/metadata.py
new file mode 100644
index 00000000..36575d0c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/metadata.py
@@ -0,0 +1,151 @@
+"""Test metadata for passing data to delegated tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from . import types as t
+
+from .util import (
+ display,
+)
+
+from .io import (
+ write_json_file,
+ read_json_file,
+)
+
+from .diff import (
+ parse_diff,
+ FileDiff,
+)
+
+
+class Metadata:
+ """Metadata object for passing data to delegated tests."""
+ def __init__(self):
+ """Initialize metadata."""
+ self.changes = {} # type: t.Dict[str, t.Tuple[t.Tuple[int, int]]]
+ self.cloud_config = None # type: t.Optional[t.Dict[str, str]]
+ self.instance_config = None # type: t.Optional[t.List[t.Dict[str, str]]]
+ self.change_description = None # type: t.Optional[ChangeDescription]
+ self.ci_provider = None # type: t.Optional[str]
+
+ def populate_changes(self, diff):
+ """
+ :type diff: list[str] | None
+ """
+ patches = parse_diff(diff)
+ patches = sorted(patches, key=lambda k: k.new.path) # type: t.List[FileDiff]
+
+ self.changes = dict((patch.new.path, tuple(patch.new.ranges)) for patch in patches)
+
+ renames = [patch.old.path for patch in patches if patch.old.path != patch.new.path and patch.old.exists and patch.new.exists]
+ deletes = [patch.old.path for patch in patches if not patch.new.exists]
+
+ # make sure old paths which were renamed or deleted are registered in changes
+ for path in renames + deletes:
+ if path in self.changes:
+ # old path was replaced with another file
+ continue
+
+ # failed tests involving deleted files should be using line 0 since there is no content remaining
+ self.changes[path] = ((0, 0),)
+
+ def to_dict(self):
+ """
+ :rtype: dict[str, any]
+ """
+ return dict(
+ changes=self.changes,
+ cloud_config=self.cloud_config,
+ instance_config=self.instance_config,
+ ci_provider=self.ci_provider,
+ change_description=self.change_description.to_dict(),
+ )
+
+ def to_file(self, path):
+ """
+ :type path: path
+ """
+ data = self.to_dict()
+
+ display.info('>>> Metadata: %s\n%s' % (path, data), verbosity=3)
+
+ write_json_file(path, data)
+
+ @staticmethod
+ def from_file(path):
+ """
+ :type path: str
+ :rtype: Metadata
+ """
+ data = read_json_file(path)
+ return Metadata.from_dict(data)
+
+ @staticmethod
+ def from_dict(data):
+ """
+ :type data: dict[str, any]
+ :rtype: Metadata
+ """
+ metadata = Metadata()
+ metadata.changes = data['changes']
+ metadata.cloud_config = data['cloud_config']
+ metadata.instance_config = data['instance_config']
+ metadata.ci_provider = data['ci_provider']
+ metadata.change_description = ChangeDescription.from_dict(data['change_description'])
+
+ return metadata
+
+
+class ChangeDescription:
+ """Description of changes."""
+ def __init__(self):
+ self.command = '' # type: str
+ self.changed_paths = [] # type: t.List[str]
+ self.deleted_paths = [] # type: t.List[str]
+ self.regular_command_targets = {} # type: t.Dict[str, t.List[str]]
+ self.focused_command_targets = {} # type: t.Dict[str, t.List[str]]
+ self.no_integration_paths = [] # type: t.List[str]
+
+ @property
+ def targets(self):
+ """
+ :rtype: list[str] | None
+ """
+ return self.regular_command_targets.get(self.command)
+
+ @property
+ def focused_targets(self):
+ """
+ :rtype: list[str] | None
+ """
+ return self.focused_command_targets.get(self.command)
+
+ def to_dict(self):
+ """
+ :rtype: dict[str, any]
+ """
+ return dict(
+ command=self.command,
+ changed_paths=self.changed_paths,
+ deleted_paths=self.deleted_paths,
+ regular_command_targets=self.regular_command_targets,
+ focused_command_targets=self.focused_command_targets,
+ no_integration_paths=self.no_integration_paths,
+ )
+
+ @staticmethod
+ def from_dict(data):
+ """
+ :param data: dict[str, any]
+ :rtype: ChangeDescription
+ """
+ changes = ChangeDescription()
+ changes.command = data['command']
+ changes.changed_paths = data['changed_paths']
+ changes.deleted_paths = data['deleted_paths']
+ changes.regular_command_targets = data['regular_command_targets']
+ changes.focused_command_targets = data['focused_command_targets']
+ changes.no_integration_paths = data['no_integration_paths']
+
+ return changes
diff --git a/test/lib/ansible_test/_internal/payload.py b/test/lib/ansible_test/_internal/payload.py
new file mode 100644
index 00000000..161faba0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/payload.py
@@ -0,0 +1,146 @@
+"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import os
+import stat
+import tarfile
+import tempfile
+import time
+
+from . import types as t
+
+from .config import (
+ IntegrationConfig,
+ ShellConfig,
+)
+
+from .util import (
+ display,
+ ANSIBLE_SOURCE_ROOT,
+ remove_tree,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+from .util_common import (
+ CommonConfig,
+)
+
+# improve performance by disabling uid/gid lookups
+tarfile.pwd = None
+tarfile.grp = None
+
+# this bin symlink map must exactly match the contents of the bin directory
+# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible
+ANSIBLE_BIN_SYMLINK_MAP = {
+ 'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py',
+ 'ansible-config': 'ansible',
+ 'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
+ 'ansible-console': 'ansible',
+ 'ansible-doc': 'ansible',
+ 'ansible-galaxy': 'ansible',
+ 'ansible-inventory': 'ansible',
+ 'ansible-playbook': 'ansible',
+ 'ansible-pull': 'ansible',
+ 'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py',
+ 'ansible-vault': 'ansible',
+}
+
+
+def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
+ """Create a payload for delegation."""
+ if args.explain:
+ return
+
+ files = list(data_context().ansible_source)
+ filters = {}
+
+ def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]
+ """Make the given file executable."""
+ tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
+ return tar_info
+
+ if not ANSIBLE_SOURCE_ROOT:
+ # reconstruct the bin directory which is not available when running from an ansible install
+ files.extend(create_temporary_bin_files(args))
+ filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
+
+ if not data_context().content.is_ansible:
+ # exclude unnecessary files when not testing ansible itself
+ files = [f for f in files if
+ is_subdir(f[1], 'bin/') or
+ is_subdir(f[1], 'lib/ansible/') or
+ is_subdir(f[1], 'test/lib/ansible_test/')]
+
+ if not isinstance(args, (ShellConfig, IntegrationConfig)):
+ # exclude built-in ansible modules when they are not needed
+ files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
+
+ collection_layouts = data_context().create_collection_layouts()
+
+ content_files = []
+ extra_files = []
+
+ for layout in collection_layouts:
+ if layout == data_context().content:
+ # include files from the current collection (layout.collection.directory will be added later)
+ content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files())
+ else:
+ # include files from each collection in the same collection root as the content being tested
+ extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
+ else:
+ # when testing ansible itself the ansible source is the content
+ content_files = files
+ # there are no extra files when testing ansible itself
+ extra_files = []
+
+ for callback in data_context().payload_callbacks:
+ # execute callbacks only on the content paths
+ # this is done before placing them in the appropriate subdirectory (see below)
+ callback(content_files)
+
+ # place ansible source files under the 'ansible' directory on the delegated host
+ files = [(src, os.path.join('ansible', dst)) for src, dst in files]
+
+ if data_context().content.collection:
+ # place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host
+ files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files)
+ # extra files already have the correct destination path
+ files.extend(extra_files)
+
+ # maintain predictable file order
+ files = sorted(set(files))
+
+ display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
+
+ start = time.time()
+
+ with tarfile.TarFile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar:
+ for src, dst in files:
+ display.info('%s -> %s' % (src, dst), verbosity=4)
+ tar.add(src, dst, filter=filters.get(dst))
+
+ duration = time.time() - start
+ payload_size_bytes = os.path.getsize(dst_path)
+
+ display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
+
+
+def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
+ """Create a temporary ansible bin directory populated using the symlink map."""
+ if args.explain:
+ temp_path = '/tmp/ansible-tmp-bin'
+ else:
+ temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
+ atexit.register(remove_tree, temp_path)
+
+ for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
+ path = os.path.join(temp_path, name)
+ os.symlink(dest, path)
+
+ return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
diff --git a/test/lib/ansible_test/_internal/powershell_import_analysis.py b/test/lib/ansible_test/_internal/powershell_import_analysis.py
new file mode 100644
index 00000000..cfc61859
--- /dev/null
+++ b/test/lib/ansible_test/_internal/powershell_import_analysis.py
@@ -0,0 +1,105 @@
+"""Analyze powershell import statements."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ display,
+)
+
+from .util_common import (
+ resolve_csharp_ps_util,
+)
+
+from .data import (
+ data_context,
+)
+
+
+def get_powershell_module_utils_imports(powershell_targets):
+ """Return a dictionary of module_utils names mapped to sets of powershell file paths.
+ :type powershell_targets: list[TestTarget]
+ :rtype: dict[str, set[str]]
+ """
+
+ module_utils = enumerate_module_utils()
+
+ imports_by_target_path = {}
+
+ for target in powershell_targets:
+ imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
+
+ imports = dict([(module_util, set()) for module_util in module_utils])
+
+ for target_path in imports_by_target_path:
+ for module_util in imports_by_target_path[target_path]:
+ imports[module_util].add(target_path)
+
+ for module_util in sorted(imports):
+ if not imports[module_util]:
+ display.warning('No imports found which use the "%s" module_util.' % module_util)
+
+ return imports
+
+
+def get_powershell_module_utils_name(path): # type: (str) -> str
+ """Return a namespace and name from the given module_utils path."""
+ base_path = data_context().content.module_utils_powershell_path
+
+ if data_context().content.collection:
+ prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
+ else:
+ prefix = ''
+
+ name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
+
+ return name
+
+
+def enumerate_module_utils():
+ """Return a list of available module_utils imports.
+ :rtype: set[str]
+ """
+ return set(get_powershell_module_utils_name(p)
+ for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
+ if os.path.splitext(p)[1] == '.psm1')
+
+
+def extract_powershell_module_utils_imports(path, module_utils):
+ """Return a list of module_utils imports found in the specified source file.
+ :type path: str
+ :type module_utils: set[str]
+ :rtype: set[str]
+ """
+ imports = set()
+
+ code = read_text_file(path)
+
+ if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
+ imports.add('Ansible.ModuleUtils.Legacy')
+
+ lines = code.splitlines()
+ line_number = 0
+
+ for line in lines:
+ line_number += 1
+ match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
+
+ if not match:
+ continue
+
+ import_name = resolve_csharp_ps_util(match.group(1), path)
+
+ if import_name in module_utils:
+ imports.add(import_name)
+ elif data_context().content.is_ansible or \
+ import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
+ display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
+
+ return imports
diff --git a/test/lib/ansible_test/_internal/provider/__init__.py b/test/lib/ansible_test/_internal/provider/__init__.py
new file mode 100644
index 00000000..6e034b53
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/__init__.py
@@ -0,0 +1,78 @@
+"""Provider (plugin) infrastructure for ansible-test."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import os
+
+from .. import types as t
+
+from ..util import (
+ ABC,
+ ApplicationError,
+ get_subclasses,
+)
+
+
+try:
+ TPathProvider = t.TypeVar('TPathProvider', bound='PathProvider')
+except AttributeError:
+ TPathProvider = None # pylint: disable=invalid-name
+
+
+def get_path_provider_classes(provider_type): # type: (t.Type[TPathProvider]) -> t.List[t.Type[TPathProvider]]
+ """Return a list of path provider classes of the given type."""
+ return sorted(get_subclasses(provider_type), key=lambda c: (c.priority, c.__name__))
+
+
+def find_path_provider(provider_type, # type: t.Type[TPathProvider],
+ provider_classes, # type: t.List[t.Type[TPathProvider]]
+ path, # type: str
+ walk, # type: bool
+ ): # type: (...) -> TPathProvider
+ """Return the first found path provider of the given type for the given path."""
+ sequences = sorted(set(pc.sequence for pc in provider_classes if pc.sequence > 0))
+
+ for sequence in sequences:
+ candidate_path = path
+ tier_classes = [pc for pc in provider_classes if pc.sequence == sequence]
+
+ while True:
+ for provider_class in tier_classes:
+ if provider_class.is_content_root(candidate_path):
+ return provider_class(candidate_path)
+
+ if not walk:
+ break
+
+ parent_path = os.path.dirname(candidate_path)
+
+ if parent_path == candidate_path:
+ break
+
+ candidate_path = parent_path
+
+ raise ProviderNotFoundForPath(provider_type, path)
+
+
+class ProviderNotFoundForPath(ApplicationError):
+ """Exception generated when a path based provider cannot be found for a given path."""
+ def __init__(self, provider_type, path): # type: (t.Type, str) -> None
+ super(ProviderNotFoundForPath, self).__init__('No %s found for path: %s' % (provider_type.__name__, path))
+
+ self.provider_type = provider_type
+ self.path = path
+
+
+class PathProvider(ABC):
+ """Base class for provider plugins that are path based."""
+ sequence = 500
+ priority = 500
+
+ def __init__(self, root): # type: (str) -> None
+ self.root = root
+
+ @staticmethod
+ @abc.abstractmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
diff --git a/test/lib/ansible_test/_internal/provider/layout/__init__.py b/test/lib/ansible_test/_internal/provider/layout/__init__.py
new file mode 100644
index 00000000..03d596fc
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/__init__.py
@@ -0,0 +1,232 @@
+"""Code for finding content."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import collections
+import os
+
+from ... import types as t
+
+from ...util import (
+ ANSIBLE_SOURCE_ROOT,
+)
+
+from .. import (
+ PathProvider,
+)
+
+
+class Layout:
+ """Description of content locations and helper methods to access content."""
+ def __init__(self,
+ root, # type: str
+ paths, # type: t.List[str]
+ ): # type: (...) -> None
+ self.root = root
+
+ self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
+ self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
+ self.__paths_tree = paths_to_tree(self.__paths)
+ self.__files_tree = paths_to_tree(self.__files)
+
+ def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
+ """Return a list of all file paths."""
+ if include_symlinked_directories:
+ return self.__paths
+
+ return self.__files
+
+ def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
+ """Return a list of file paths found recursively under the given directory."""
+ if include_symlinked_directories:
+ tree = self.__paths_tree
+ else:
+ tree = self.__files_tree
+
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(tree, parts)
+
+ if not item:
+ return []
+
+ directories = collections.deque(item[0].values())
+
+ files = list(item[1])
+
+ while directories:
+ item = directories.pop()
+ directories.extend(item[0].values())
+ files.extend(item[1])
+
+ return files
+
+ def get_dirs(self, directory): # type: (str) -> t.List[str]
+ """Return a list directory paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return [os.path.join(directory, key) for key in item[0].keys()] if item else []
+
+ def get_files(self, directory): # type: (str) -> t.List[str]
+ """Return a list of file paths found directly under the given directory."""
+ parts = directory.rstrip(os.path.sep).split(os.path.sep)
+ item = get_tree_item(self.__files_tree, parts)
+ return item[1] if item else []
+
+
+class ContentLayout(Layout):
+ """Information about the current Ansible content being tested."""
+ def __init__(self,
+ root, # type: str
+ paths, # type: t.List[str]
+ plugin_paths, # type: t.Dict[str, str]
+ collection, # type: t.Optional[CollectionDetail]
+ test_path, # type: str
+ results_path, # type: str
+ sanity_path, # type: str
+ sanity_messages, # type: t.Optional[LayoutMessages]
+ integration_path, # type: str
+ integration_targets_path, # type: str
+ integration_vars_path, # type: str
+ integration_messages, # type: t.Optional[LayoutMessages]
+ unit_path, # type: str
+ unit_module_path, # type: str
+ unit_module_utils_path, # type: str
+ unit_messages, # type: t.Optional[LayoutMessages]
+ ): # type: (...) -> None
+ super(ContentLayout, self).__init__(root, paths)
+
+ self.plugin_paths = plugin_paths
+ self.collection = collection
+ self.test_path = test_path
+ self.results_path = results_path
+ self.sanity_path = sanity_path
+ self.sanity_messages = sanity_messages
+ self.integration_path = integration_path
+ self.integration_targets_path = integration_targets_path
+ self.integration_vars_path = integration_vars_path
+ self.integration_messages = integration_messages
+ self.unit_path = unit_path
+ self.unit_module_path = unit_module_path
+ self.unit_module_utils_path = unit_module_utils_path
+ self.unit_messages = unit_messages
+
+ self.is_ansible = root == ANSIBLE_SOURCE_ROOT
+
+ @property
+ def prefix(self): # type: () -> str
+ """Return the collection prefix or an empty string if not a collection."""
+ if self.collection:
+ return self.collection.prefix
+
+ return ''
+
+ @property
+ def module_path(self): # type: () -> t.Optional[str]
+ """Return the path where modules are found, if any."""
+ return self.plugin_paths.get('modules')
+
+ @property
+ def module_utils_path(self): # type: () -> t.Optional[str]
+ """Return the path where module_utils are found, if any."""
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_powershell_path(self): # type: () -> t.Optional[str]
+ """Return the path where powershell module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'powershell')
+
+ return self.plugin_paths.get('module_utils')
+
+ @property
+ def module_utils_csharp_path(self): # type: () -> t.Optional[str]
+ """Return the path where csharp module_utils are found, if any."""
+ if self.is_ansible:
+ return os.path.join(self.plugin_paths['module_utils'], 'csharp')
+
+ return self.plugin_paths.get('module_utils')
+
+
+class LayoutMessages:
+ """Messages generated during layout creation that should be deferred for later display."""
+ def __init__(self):
+ self.info = [] # type: t.List[str]
+ self.warning = [] # type: t.List[str]
+ self.error = [] # type: t.List[str]
+
+
+class CollectionDetail:
+ """Details about the layout of the current collection."""
+ def __init__(self,
+ name, # type: str
+ namespace, # type: str
+ root, # type: str
+ ): # type: (...) -> None
+ self.name = name
+ self.namespace = namespace
+ self.root = root
+ self.full_name = '%s.%s' % (namespace, name)
+ self.prefix = '%s.' % self.full_name
+ self.directory = os.path.join('ansible_collections', namespace, name)
+
+
+class LayoutProvider(PathProvider):
+ """Base class for layout providers."""
+ PLUGIN_TYPES = (
+ 'action',
+ 'become',
+ 'cache',
+ 'callback',
+ 'cliconf',
+ 'connection',
+ 'doc_fragments',
+ 'filter',
+ 'httpapi',
+ 'inventory',
+ 'lookup',
+ 'module_utils',
+ 'modules',
+ 'netconf',
+ 'shell',
+ 'strategy',
+ 'terminal',
+ 'test',
+ 'vars',
+ )
+
+ @abc.abstractmethod
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a layout using the given root and paths."""
+
+
+def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple(t.Dict[str, t.Any], t.List[str])
+ """Return a filesystem tree from the given list of paths."""
+ tree = {}, []
+
+ for path in paths:
+ parts = path.split(os.path.sep)
+ root = tree
+
+ for part in parts[:-1]:
+ if part not in root[0]:
+ root[0][part] = {}, []
+
+ root = root[0][part]
+
+ root[1].append(path)
+
+ return tree
+
+
+def get_tree_item(tree, parts): # type: (t.Tuple(t.Dict[str, t.Any], t.List[str]), t.List[str]) -> t.Optional[t.Tuple(t.Dict[str, t.Any], t.List[str])]
+ """Return the portion of the tree found under the path given by parts, or None if it does not exist."""
+ root = tree
+
+ for part in parts:
+ root = root[0].get(part)
+
+ if not root:
+ return None
+
+ return root
diff --git a/test/lib/ansible_test/_internal/provider/layout/ansible.py b/test/lib/ansible_test/_internal/provider/layout/ansible.py
new file mode 100644
index 00000000..49ca482b
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/ansible.py
@@ -0,0 +1,47 @@
+"""Layout provider for Ansible source."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+)
+
+
+class AnsibleLayout(LayoutProvider):
+ """Layout provider for Ansible source."""
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, 'setup.py')) and os.path.exists(os.path.join(path, 'bin/ansible-test'))
+
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('lib/ansible/plugins', p)) for p in self.PLUGIN_TYPES)
+
+ plugin_paths.update(dict(
+ modules='lib/ansible/modules',
+ module_utils='lib/ansible/module_utils',
+ ))
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=None,
+ test_path='test',
+ results_path='test/results',
+ sanity_path='test/sanity',
+ sanity_messages=None,
+ integration_path='test/integration',
+ integration_targets_path='test/integration/targets',
+ integration_vars_path='test/integration/integration_config.yml',
+ integration_messages=None,
+ unit_path='test/units',
+ unit_module_path='test/units/modules',
+ unit_module_utils_path='test/units/module_utils',
+ unit_messages=None,
+ )
diff --git a/test/lib/ansible_test/_internal/provider/layout/collection.py b/test/lib/ansible_test/_internal/provider/layout/collection.py
new file mode 100644
index 00000000..ffad29f2
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/layout/collection.py
@@ -0,0 +1,123 @@
+"""Layout provider for Ansible collections."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ ContentLayout,
+ LayoutProvider,
+ CollectionDetail,
+ LayoutMessages,
+)
+
+
+class CollectionLayout(LayoutProvider):
+ """Layout provider for Ansible collections."""
+ __module_path = 'plugins/modules'
+ __unit_path = 'test/unit'
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections':
+ return True
+
+ return False
+
+ def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
+ """Create a Layout using the given root and paths."""
+ plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES)
+
+ collection_root = os.path.dirname(os.path.dirname(root))
+ collection_dir = os.path.relpath(root, collection_root)
+ collection_namespace, collection_name = collection_dir.split(os.path.sep)
+
+ collection_root = os.path.dirname(collection_root)
+
+ sanity_messages = LayoutMessages()
+ integration_messages = LayoutMessages()
+ unit_messages = LayoutMessages()
+
+ # these apply to all test commands
+ self.__check_test_path(paths, sanity_messages)
+ self.__check_test_path(paths, integration_messages)
+ self.__check_test_path(paths, unit_messages)
+
+ # these apply to specific test commands
+ integration_targets_path = self.__check_integration_path(paths, integration_messages)
+ self.__check_unit_path(paths, unit_messages)
+
+ return ContentLayout(root,
+ paths,
+ plugin_paths=plugin_paths,
+ collection=CollectionDetail(
+ name=collection_name,
+ namespace=collection_namespace,
+ root=collection_root,
+ ),
+ test_path='tests',
+ results_path='tests/output',
+ sanity_path='tests/sanity',
+ sanity_messages=sanity_messages,
+ integration_path='tests/integration',
+ integration_targets_path=integration_targets_path.rstrip(os.path.sep),
+ integration_vars_path='tests/integration/integration_config.yml',
+ integration_messages=integration_messages,
+ unit_path='tests/unit',
+ unit_module_path='tests/unit/plugins/modules',
+ unit_module_utils_path='tests/unit/plugins/module_utils',
+ unit_messages=unit_messages,
+ )
+
+ @staticmethod
+ def __check_test_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
+ modern_test_path = 'tests/'
+ modern_test_path_found = any(path.startswith(modern_test_path) for path in paths)
+ legacy_test_path = 'test/'
+ legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths)
+
+ if modern_test_path_found and legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path))
+ elif legacy_test_path_found:
+ messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path))
+
+ @staticmethod
+ def __check_integration_path(paths, messages): # type: (t.List[str], LayoutMessages) -> str
+ modern_integration_path = 'roles/test/'
+ modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths)
+ legacy_integration_path = 'tests/integration/targets/'
+ legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths)
+
+ if modern_integration_path_found and legacy_integration_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = modern_integration_path
+ elif legacy_integration_path_found:
+ messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path))
+ integration_targets_path = legacy_integration_path
+ elif modern_integration_path_found:
+ messages.info.append('Loading tests from "%s".' % modern_integration_path)
+ integration_targets_path = modern_integration_path
+ else:
+ messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path))
+ integration_targets_path = modern_integration_path
+
+ return integration_targets_path
+
+ @staticmethod
+ def __check_unit_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
+ modern_unit_path = 'tests/unit/'
+ modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths)
+ legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/
+ legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths)
+
+ if modern_unit_path_found and legacy_unit_path_found:
+ messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path))
+ elif legacy_unit_path_found:
+ messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path))
+ elif modern_unit_path_found:
+ pass # unit tests only run from one directory so no message is needed
+ else:
+ messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path)
diff --git a/test/lib/ansible_test/_internal/provider/source/__init__.py b/test/lib/ansible_test/_internal/provider/source/__init__.py
new file mode 100644
index 00000000..fab28b09
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/__init__.py
@@ -0,0 +1,18 @@
+"""Common code for source providers."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+
+from ... import types as t
+
+from .. import (
+ PathProvider,
+)
+
+
+class SourceProvider(PathProvider):
+ """Base class for source providers."""
+ @abc.abstractmethod
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
diff --git a/test/lib/ansible_test/_internal/provider/source/git.py b/test/lib/ansible_test/_internal/provider/source/git.py
new file mode 100644
index 00000000..0bf81a1c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/git.py
@@ -0,0 +1,72 @@
+"""Source provider for a content root managed by git version control."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from ...git import (
+ Git,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from ...util import (
+ SubprocessError,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class GitSource(SourceProvider):
+ """Source provider for a content root managed by git version control."""
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return os.path.exists(os.path.join(path, '.git'))
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = self.__get_paths(path)
+
+ try:
+ submodule_paths = Git(path).get_submodule_paths()
+ except SubprocessError:
+ if path == self.root:
+ raise
+
+ # older versions of git require submodule commands to be executed from the top level of the working tree
+ # git version 2.18.1 (centos8) does not have this restriction
+ # git version 1.8.3.1 (centos7) does
+ # fall back to using the top level directory of the working tree only when needed
+ # this avoids penalizing newer git versions with a potentially slower analysis due to additional submodules
+ rel_path = os.path.relpath(path, self.root) + os.path.sep
+
+ submodule_paths = Git(self.root).get_submodule_paths()
+ submodule_paths = [os.path.relpath(p, rel_path) for p in submodule_paths if p.startswith(rel_path)]
+
+ for submodule_path in submodule_paths:
+ paths.extend(os.path.join(submodule_path, p) for p in self.__get_paths(os.path.join(path, submodule_path)))
+
+ # git reports submodule directories as regular files
+ paths = [p for p in paths if p not in submodule_paths]
+
+ return paths
+
+ @staticmethod
+ def __get_paths(path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ git = Git(path)
+ paths = git.get_file_names(['--cached', '--others', '--exclude-standard'])
+ deleted_paths = git.get_file_names(['--deleted'])
+ paths = sorted(set(paths) - set(deleted_paths))
+
+ # directory symlinks are reported by git as regular files but they need to be treated as directories
+ paths = [path + os.path.sep if os.path.isdir(to_bytes(path)) else path for path in paths]
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/installed.py b/test/lib/ansible_test/_internal/provider/source/installed.py
new file mode 100644
index 00000000..d24a6e3d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/installed.py
@@ -0,0 +1,43 @@
+"""Source provider for content which has been installed."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from . import (
+ SourceProvider,
+)
+
+
+class InstalledSource(SourceProvider):
+ """Source provider for content which has been installed."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ )
+
+ for root, _dummy, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions])
+
+ # NOTE: directory symlinks are ignored as there should be no directory symlinks for an install
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/provider/source/unversioned.py b/test/lib/ansible_test/_internal/provider/source/unversioned.py
new file mode 100644
index 00000000..09105789
--- /dev/null
+++ b/test/lib/ansible_test/_internal/provider/source/unversioned.py
@@ -0,0 +1,87 @@
+"""Fallback source provider when no other provider matches the content root."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ... import types as t
+
+from ...constants import (
+ TIMEOUT_PATH,
+)
+
+from ...encoding import (
+ to_bytes,
+)
+
+from . import (
+ SourceProvider,
+)
+
+
+class UnversionedSource(SourceProvider):
+ """Fallback source provider when no other provider matches the content root."""
+ sequence = 0 # disable automatic detection
+
+ @staticmethod
+ def is_content_root(path): # type: (str) -> bool
+ """Return True if the given path is a content root for this provider."""
+ return False
+
+ def get_paths(self, path): # type: (str) -> t.List[str]
+ """Return the list of available content paths under the given path."""
+ paths = []
+
+ kill_any_dir = (
+ '.idea',
+ '.pytest_cache',
+ '__pycache__',
+ 'ansible.egg-info',
+ 'ansible_base.egg-info',
+ )
+
+ kill_sub_dir = {
+ 'test': (
+ 'results',
+ 'cache',
+ 'output',
+ ),
+ 'tests': (
+ 'output',
+ ),
+ 'docs/docsite': (
+ '_build',
+ ),
+ }
+
+ kill_sub_file = {
+ '': (
+ TIMEOUT_PATH,
+ ),
+ }
+
+ kill_extensions = (
+ '.pyc',
+ '.pyo',
+ '.retry',
+ )
+
+ for root, dir_names, file_names in os.walk(path):
+ rel_root = os.path.relpath(root, path)
+
+ if rel_root == '.':
+ rel_root = ''
+
+ for kill in kill_any_dir + kill_sub_dir.get(rel_root, ()):
+ if kill in dir_names:
+ dir_names.remove(kill)
+
+ kill_files = kill_sub_file.get(rel_root, ())
+
+ paths.extend([os.path.join(rel_root, file_name) for file_name in file_names
+ if not os.path.splitext(file_name)[1] in kill_extensions and file_name not in kill_files])
+
+ # include directory symlinks since they will not be traversed and would otherwise go undetected
+ paths.extend([os.path.join(rel_root, dir_name) + os.path.sep for dir_name in dir_names if os.path.islink(to_bytes(dir_name))])
+
+ return paths
diff --git a/test/lib/ansible_test/_internal/sanity/__init__.py b/test/lib/ansible_test/_internal/sanity/__init__.py
new file mode 100644
index 00000000..976bbb2f
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/__init__.py
@@ -0,0 +1,946 @@
+"""Execute Ansible sanity tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import abc
+import glob
+import os
+import re
+import collections
+
+from .. import types as t
+
+from ..io import (
+ read_json_file,
+)
+
+from ..util import (
+ ApplicationError,
+ SubprocessError,
+ display,
+ import_plugins,
+ load_plugins,
+ parse_to_list_of_dict,
+ ABC,
+ ANSIBLE_TEST_DATA_ROOT,
+ is_binary_file,
+ read_lines_without_comments,
+ get_available_python_versions,
+ find_python,
+ is_subdir,
+ paths_to_dirs,
+ get_ansible_version,
+ str_to_version,
+)
+
+from ..util_common import (
+ run_command,
+ intercept_command,
+ handle_layout_messages,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..target import (
+ walk_internal_targets,
+ walk_sanity_targets,
+ TestTarget,
+)
+
+from ..executor import (
+ get_changes_filter,
+ AllTargetsSkipped,
+ Delegate,
+ install_command_requirements,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..test import (
+ TestSuccess,
+ TestFailure,
+ TestSkipped,
+ TestMessage,
+ calculate_best_confidence,
+)
+
+from ..data import (
+ data_context,
+)
+
+COMMAND = 'sanity'
+SANITY_ROOT = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'sanity')
+
+
+def command_sanity(args):
+ """
+ :type args: SanityConfig
+ """
+ handle_layout_messages(data_context().content.sanity_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ targets = SanityTargets.create(args.include, args.exclude, require)
+
+ if not targets.include:
+ raise AllTargetsSkipped()
+
+ if args.delegate:
+ raise Delegate(require=changes, exclude=args.exclude)
+
+ tests = sanity_get_tests()
+
+ if args.test:
+ tests = [target for target in tests if target.name in args.test]
+ else:
+ disabled = [target.name for target in tests if not target.enabled and not args.allow_disabled]
+ tests = [target for target in tests if target.enabled or args.allow_disabled]
+
+ if disabled:
+ display.warning('Skipping tests disabled by default without --allow-disabled: %s' % ', '.join(sorted(disabled)))
+
+ if args.skip_test:
+ tests = [target for target in tests if target.name not in args.skip_test]
+
+ total = 0
+ failed = []
+
+ for test in tests:
+ if args.list_tests:
+ display.info(test.name)
+ continue
+
+ available_versions = sorted(get_available_python_versions(SUPPORTED_PYTHON_VERSIONS).keys())
+
+ if args.python:
+ # specific version selected
+ versions = (args.python,)
+ elif isinstance(test, SanityMultipleVersion):
+ # try all supported versions for multi-version tests when a specific version has not been selected
+ versions = test.supported_python_versions
+ elif not test.supported_python_versions or args.python_version in test.supported_python_versions:
+ # the test works with any version or the version we're already running
+ versions = (args.python_version,)
+ else:
+ # available versions supported by the test
+ versions = tuple(sorted(set(available_versions) & set(test.supported_python_versions)))
+ # use the lowest available version supported by the test or the current version as a fallback (which will be skipped)
+ versions = versions[:1] or (args.python_version,)
+
+ for version in versions:
+ if isinstance(test, SanityMultipleVersion):
+ skip_version = version
+ else:
+ skip_version = None
+
+ options = ''
+
+ if test.supported_python_versions and version not in test.supported_python_versions:
+ display.warning("Skipping sanity test '%s' on unsupported Python %s." % (test.name, version))
+ result = SanitySkipped(test.name, skip_version)
+ elif not args.python and version not in available_versions:
+ display.warning("Skipping sanity test '%s' on Python %s due to missing interpreter." % (test.name, version))
+ result = SanitySkipped(test.name, skip_version)
+ else:
+ if test.supported_python_versions:
+ display.info("Running sanity test '%s' with Python %s" % (test.name, version))
+ else:
+ display.info("Running sanity test '%s'" % test.name)
+
+ if isinstance(test, SanityCodeSmellTest):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityMultipleVersion):
+ settings = test.load_processor(args, version)
+ elif isinstance(test, SanitySingleVersion):
+ settings = test.load_processor(args)
+ elif isinstance(test, SanityVersionNeutral):
+ settings = test.load_processor(args)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+
+ all_targets = targets.targets
+
+ if test.all_targets:
+ usable_targets = targets.targets
+ elif test.no_targets:
+ usable_targets = tuple()
+ else:
+ usable_targets = targets.include
+
+ all_targets = SanityTargets.filter_and_inject_targets(test, all_targets)
+ usable_targets = SanityTargets.filter_and_inject_targets(test, usable_targets)
+
+ usable_targets = sorted(test.filter_targets(list(usable_targets)))
+ usable_targets = settings.filter_skipped_targets(usable_targets)
+ sanity_targets = SanityTargets(tuple(all_targets), tuple(usable_targets))
+
+ if usable_targets or test.no_targets:
+ install_command_requirements(args, version, context=test.name, enable_pyyaml_check=True)
+
+ if isinstance(test, SanityCodeSmellTest):
+ result = test.test(args, sanity_targets, version)
+ elif isinstance(test, SanityMultipleVersion):
+ result = test.test(args, sanity_targets, version)
+ options = ' --python %s' % version
+ elif isinstance(test, SanitySingleVersion):
+ result = test.test(args, sanity_targets, version)
+ elif isinstance(test, SanityVersionNeutral):
+ result = test.test(args, sanity_targets)
+ else:
+ raise Exception('Unsupported test type: %s' % type(test))
+ else:
+ result = SanitySkipped(test.name, skip_version)
+
+ result.write(args)
+
+ total += 1
+
+ if isinstance(result, SanityFailure):
+ failed.append(result.test + options)
+
+ if failed:
+ message = 'The %d sanity test(s) listed below (out of %d) failed. See error output above for details.\n%s' % (
+ len(failed), total, '\n'.join(failed))
+
+ if args.failure_ok:
+ display.error(message)
+ else:
+ raise ApplicationError(message)
+
+
+def collect_code_smell_tests(): # type: () -> t.Tuple[SanityFunc, ...]
+ """Return a tuple of available code smell sanity tests."""
+ paths = glob.glob(os.path.join(SANITY_ROOT, 'code-smell', '*.py'))
+
+ if data_context().content.is_ansible:
+ # include Ansible specific code-smell tests which are not configured to be skipped
+ ansible_code_smell_root = os.path.join(data_context().content.root, 'test', 'sanity', 'code-smell')
+ skip_tests = read_lines_without_comments(os.path.join(ansible_code_smell_root, 'skip.txt'), remove_blank_lines=True, optional=True)
+ paths.extend(path for path in glob.glob(os.path.join(ansible_code_smell_root, '*.py')) if os.path.basename(path) not in skip_tests)
+
+ paths = sorted(p for p in paths if os.access(p, os.X_OK) and os.path.isfile(p))
+ tests = tuple(SanityCodeSmellTest(p) for p in paths)
+
+ return tests
+
+
+def sanity_get_tests():
+ """
+ :rtype: tuple[SanityFunc]
+ """
+ return SANITY_TESTS
+
+
+class SanityIgnoreParser:
+ """Parser for the consolidated sanity test ignore file."""
+ NO_CODE = '_'
+
+ def __init__(self, args): # type: (SanityConfig) -> None
+ if data_context().content.collection:
+ ansible_version = '%s.%s' % tuple(get_ansible_version().split('.')[:2])
+
+ ansible_label = 'Ansible %s' % ansible_version
+ file_name = 'ignore-%s.txt' % ansible_version
+ else:
+ ansible_label = 'Ansible'
+ file_name = 'ignore.txt'
+
+ self.args = args
+ self.relative_path = os.path.join(data_context().content.sanity_path, file_name)
+ self.path = os.path.join(data_context().content.root, self.relative_path)
+ self.ignores = collections.defaultdict(lambda: collections.defaultdict(dict)) # type: t.Dict[str, t.Dict[str, t.Dict[str, int]]]
+ self.skips = collections.defaultdict(lambda: collections.defaultdict(int)) # type: t.Dict[str, t.Dict[str, int]]
+ self.parse_errors = [] # type: t.List[t.Tuple[int, int, str]]
+ self.file_not_found_errors = [] # type: t.List[t.Tuple[int, str]]
+
+ lines = read_lines_without_comments(self.path, optional=True)
+ targets = SanityTargets.get_targets()
+ paths = set(target.path for target in targets)
+ tests_by_name = {} # type: t.Dict[str, SanityTest]
+ versioned_test_names = set() # type: t.Set[str]
+ unversioned_test_names = {} # type: t.Dict[str, str]
+ directories = paths_to_dirs(list(paths))
+ paths_by_test = {} # type: t.Dict[str, t.Set[str]]
+
+ display.info('Read %d sanity test ignore line(s) for %s from: %s' % (len(lines), ansible_label, self.relative_path), verbosity=1)
+
+ for test in sanity_get_tests():
+ test_targets = SanityTargets.filter_and_inject_targets(test, targets)
+
+ paths_by_test[test.name] = set(target.path for target in test.filter_targets(test_targets))
+
+ if isinstance(test, SanityMultipleVersion):
+ versioned_test_names.add(test.name)
+ tests_by_name.update(dict(('%s-%s' % (test.name, python_version), test) for python_version in test.supported_python_versions))
+ else:
+ unversioned_test_names.update(dict(('%s-%s' % (test.name, python_version), test.name) for python_version in SUPPORTED_PYTHON_VERSIONS))
+ tests_by_name[test.name] = test
+
+ for line_no, line in enumerate(lines, start=1):
+ if not line:
+ self.parse_errors.append((line_no, 1, "Line cannot be empty or contain only a comment"))
+ continue
+
+ parts = line.split(' ')
+ path = parts[0]
+ codes = parts[1:]
+
+ if not path:
+ self.parse_errors.append((line_no, 1, "Line cannot start with a space"))
+ continue
+
+ if path.endswith(os.path.sep):
+ if path not in directories:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+ else:
+ if path not in paths:
+ self.file_not_found_errors.append((line_no, path))
+ continue
+
+ if not codes:
+ self.parse_errors.append((line_no, len(path), "Error code required after path"))
+ continue
+
+ code = codes[0]
+
+ if not code:
+ self.parse_errors.append((line_no, len(path) + 1, "Error code after path cannot be empty"))
+ continue
+
+ if len(codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(code) + 2, "Error code cannot contain spaces"))
+ continue
+
+ parts = code.split('!')
+ code = parts[0]
+ commands = parts[1:]
+
+ parts = code.split(':')
+ test_name = parts[0]
+ error_codes = parts[1:]
+
+ test = tests_by_name.get(test_name)
+
+ if not test:
+ unversioned_name = unversioned_test_names.get(test_name)
+
+ if unversioned_name:
+ self.parse_errors.append((line_no, len(path) + len(unversioned_name) + 2, "Sanity test '%s' cannot use a Python version like '%s'" % (
+ unversioned_name, test_name)))
+ elif test_name in versioned_test_names:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires a Python version like '%s-%s'" % (
+ test_name, test_name, args.python_version)))
+ else:
+ self.parse_errors.append((line_no, len(path) + 2, "Sanity test '%s' does not exist" % test_name))
+
+ continue
+
+ if path.endswith(os.path.sep) and not test.include_directories:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not support directory paths" % test_name))
+ continue
+
+ if path not in paths_by_test[test.name] and not test.no_targets:
+ self.parse_errors.append((line_no, 1, "Sanity test '%s' does not test path '%s'" % (test_name, path)))
+ continue
+
+ if commands and error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Error code cannot contain both '!' and ':' characters"))
+ continue
+
+ if commands:
+ command = commands[0]
+
+ if len(commands) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(command) + 3, "Error code cannot contain multiple '!' characters"))
+ continue
+
+ if command == 'skip':
+ if not test.can_skip:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' cannot be skipped" % test_name))
+ continue
+
+ existing_line_no = self.skips.get(test_name, {}).get(path)
+
+ if existing_line_no:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' skip for path '%s' first found on line %d" % (test_name, path, existing_line_no)))
+ continue
+
+ self.skips[test_name][path] = line_no
+ continue
+
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Command '!%s' not recognized" % command))
+ continue
+
+ if not test.can_ignore:
+ self.parse_errors.append((line_no, len(path) + 1, "Sanity test '%s' cannot be ignored" % test_name))
+ continue
+
+ if test.error_code:
+ if not error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 1, "Sanity test '%s' requires an error code" % test_name))
+ continue
+
+ error_code = error_codes[0]
+
+ if len(error_codes) > 1:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + len(error_code) + 3, "Error code cannot contain multiple ':' characters"))
+ continue
+
+ if error_code in test.optional_error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 3, "Optional error code '%s' cannot be ignored" % (
+ error_code)))
+ continue
+ else:
+ if error_codes:
+ self.parse_errors.append((line_no, len(path) + len(test_name) + 2, "Sanity test '%s' does not support error codes" % test_name))
+ continue
+
+ error_code = self.NO_CODE
+
+ existing = self.ignores.get(test_name, {}).get(path, {}).get(error_code)
+
+ if existing:
+ if test.error_code:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for error code '%s' for path '%s' first found on line %d" % (
+ test_name, error_code, path, existing)))
+ else:
+ self.parse_errors.append((line_no, 1, "Duplicate '%s' ignore for path '%s' first found on line %d" % (
+ test_name, path, existing)))
+
+ continue
+
+ self.ignores[test_name][path][error_code] = line_no
+
+ @staticmethod
+ def load(args): # type: (SanityConfig) -> SanityIgnoreParser
+ """Return the current SanityIgnore instance, initializing it if needed."""
+ try:
+ return SanityIgnoreParser.instance
+ except AttributeError:
+ pass
+
+ SanityIgnoreParser.instance = SanityIgnoreParser(args)
+ return SanityIgnoreParser.instance
+
+
+class SanityIgnoreProcessor:
+ """Processor for sanity test ignores for a single run of one sanity test."""
+ def __init__(self,
+ args, # type: SanityConfig
+ test, # type: SanityTest
+ python_version, # type: t.Optional[str]
+ ): # type: (...) -> None
+ name = test.name
+ code = test.error_code
+
+ if python_version:
+ full_name = '%s-%s' % (name, python_version)
+ else:
+ full_name = name
+
+ self.args = args
+ self.test = test
+ self.code = code
+ self.parser = SanityIgnoreParser.load(args)
+ self.ignore_entries = self.parser.ignores.get(full_name, {})
+ self.skip_entries = self.parser.skips.get(full_name, {})
+ self.used_line_numbers = set() # type: t.Set[int]
+
+ def filter_skipped_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given targets, with any skipped paths filtered out."""
+ return sorted(target for target in targets if target.path not in self.skip_entries)
+
+ def process_errors(self, errors, paths): # type: (t.List[SanityMessage], t.List[str]) -> t.List[SanityMessage]
+ """Return the given errors filtered for ignores and with any settings related errors included."""
+ errors = self.filter_messages(errors)
+ errors.extend(self.get_errors(paths))
+
+ errors = sorted(set(errors))
+
+ return errors
+
+ def filter_messages(self, messages): # type: (t.List[SanityMessage]) -> t.List[SanityMessage]
+ """Return a filtered list of the given messages using the entries that have been loaded."""
+ filtered = []
+
+ for message in messages:
+ if message.code in self.test.optional_error_codes and not self.args.enable_optional_errors:
+ continue
+
+ path_entry = self.ignore_entries.get(message.path)
+
+ if path_entry:
+ code = message.code if self.code else SanityIgnoreParser.NO_CODE
+ line_no = path_entry.get(code)
+
+ if line_no:
+ self.used_line_numbers.add(line_no)
+ continue
+
+ filtered.append(message)
+
+ return filtered
+
+ def get_errors(self, paths): # type: (t.List[str]) -> t.List[SanityMessage]
+ """Return error messages related to issues with the file."""
+ messages = []
+
+ # unused errors
+
+ unused = [] # type: t.List[t.Tuple[int, str, str]]
+
+ if self.test.no_targets or self.test.all_targets:
+ # tests which do not accept a target list, or which use all targets, always return all possible errors, so all ignores can be checked
+ targets = SanityTargets.get_targets()
+ test_targets = SanityTargets.filter_and_inject_targets(self.test, targets)
+ paths = [target.path for target in test_targets]
+
+ for path in paths:
+ path_entry = self.ignore_entries.get(path)
+
+ if not path_entry:
+ continue
+
+ unused.extend((line_no, path, code) for code, line_no in path_entry.items() if line_no not in self.used_line_numbers)
+
+ messages.extend(SanityMessage(
+ code=self.code,
+ message="Ignoring '%s' on '%s' is unnecessary" % (code, path) if self.code else "Ignoring '%s' is unnecessary" % path,
+ path=self.parser.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((self.parser.path, line), (path, 0)), self.args.metadata) if self.args.metadata.changes else None,
+ ) for line, path, code in unused)
+
+ return messages
+
+
+class SanitySuccess(TestSuccess):
+ """Sanity test success."""
+ def __init__(self, test, python_version=None):
+ """
+ :type test: str
+ :type python_version: str
+ """
+ super(SanitySuccess, self).__init__(COMMAND, test, python_version)
+
+
+class SanitySkipped(TestSkipped):
+ """Sanity test skipped."""
+ def __init__(self, test, python_version=None):
+ """
+ :type test: str
+ :type python_version: str
+ """
+ super(SanitySkipped, self).__init__(COMMAND, test, python_version)
+
+
+class SanityFailure(TestFailure):
+ """Sanity test failure."""
+ def __init__(self, test, python_version=None, messages=None, summary=None):
+ """
+ :type test: str
+ :type python_version: str
+ :type messages: list[SanityMessage]
+ :type summary: unicode
+ """
+ super(SanityFailure, self).__init__(COMMAND, test, python_version, messages, summary)
+
+
+class SanityMessage(TestMessage):
+ """Single sanity test message for one file."""
+
+
+class SanityTargets:
+ """Sanity test target information."""
+ def __init__(self, targets, include): # type: (t.Tuple[TestTarget], t.Tuple[TestTarget]) -> None
+ self.targets = targets
+ self.include = include
+
+ @staticmethod
+ def create(include, exclude, require): # type: (t.List[str], t.List[str], t.List[str]) -> SanityTargets
+ """Create a SanityTargets instance from the given include, exclude and require lists."""
+ _targets = SanityTargets.get_targets()
+ _include = walk_internal_targets(_targets, include, exclude, require)
+ return SanityTargets(_targets, _include)
+
+ @staticmethod
+ def filter_and_inject_targets(test, targets): # type: (SanityTest, t.Iterable[TestTarget]) -> t.List[TestTarget]
+ """Filter and inject targets based on test requirements and the given target list."""
+ test_targets = list(targets)
+
+ if not test.include_symlinks:
+ # remove all symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ if not test.include_directories or not test.include_symlinks:
+ # exclude symlinked directories unless supported by the test
+ test_targets = [target for target in test_targets if not target.path.endswith(os.path.sep)]
+
+ if test.include_directories:
+ # include directories containing any of the included files
+ test_targets += tuple(TestTarget(path, None, None, '') for path in paths_to_dirs([target.path for target in test_targets]))
+
+ if not test.include_symlinks:
+ # remove all directory symlinks unless supported by the test
+ test_targets = [target for target in test_targets if not target.symlink]
+
+ return test_targets
+
+ @staticmethod
+ def get_targets(): # type: () -> t.Tuple[TestTarget, ...]
+ """Return a tuple of sanity test targets. Uses a cached version when available."""
+ try:
+ return SanityTargets.get_targets.targets
+ except AttributeError:
+ SanityTargets.get_targets.targets = tuple(sorted(walk_sanity_targets()))
+
+ return SanityTargets.get_targets.targets
+
+
+class SanityTest(ABC):
+ """Sanity test base class."""
+ __metaclass__ = abc.ABCMeta
+
+ ansible_only = False
+
+ def __init__(self, name):
+ self.name = name
+ self.enabled = True
+
+ # Optional error codes represent errors which spontaneously occur without changes to the content under test, such as those based on the current date.
+ # Because these errors can be unpredictable they behave differently than normal error codes:
+ # * They are not reported by default. The `--enable-optional-errors` option must be used to display these errors.
+ # * They cannot be ignored. This is done to maintain the integrity of the ignore system.
+ self.optional_error_codes = set()
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return None
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return True
+
+ @property
+ def can_skip(self): # type: () -> bool
+ """True if the test supports skip entries."""
+ return not self.all_targets and not self.no_targets
+
+ @property
+ def all_targets(self): # type: () -> bool
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return False
+
+ @property
+ def include_directories(self): # type: () -> bool
+ """True if the test targets should include directories."""
+ return False
+
+ @property
+ def include_symlinks(self): # type: () -> bool
+ """True if the test targets should include symlinks."""
+ return False
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return tuple(python_version for python_version in SUPPORTED_PYTHON_VERSIONS if python_version.startswith('3.'))
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget] # pylint: disable=unused-argument
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ raise NotImplementedError('Sanity test "%s" must implement "filter_targets" or set "no_targets" to True.' % self.name)
+
+
+class SanityCodeSmellTest(SanityTest):
+ """Sanity test script."""
+ def __init__(self, path):
+ name = os.path.splitext(os.path.basename(path))[0]
+ config_path = os.path.splitext(path)[0] + '.json'
+
+ super(SanityCodeSmellTest, self).__init__(name)
+
+ self.path = path
+ self.config_path = config_path if os.path.exists(config_path) else None
+ self.config = None
+
+ if self.config_path:
+ self.config = read_json_file(self.config_path)
+
+ if self.config:
+ self.enabled = not self.config.get('disabled')
+
+ self.output = self.config.get('output') # type: t.Optional[str]
+ self.extensions = self.config.get('extensions') # type: t.List[str]
+ self.prefixes = self.config.get('prefixes') # type: t.List[str]
+ self.files = self.config.get('files') # type: t.List[str]
+ self.text = self.config.get('text') # type: t.Optional[bool]
+ self.ignore_self = self.config.get('ignore_self') # type: bool
+ self.intercept = self.config.get('intercept') # type: bool
+ self.minimum_python_version = self.config.get('minimum_python_version') # type: t.Optional[str]
+
+ self.__all_targets = self.config.get('all_targets') # type: bool
+ self.__no_targets = self.config.get('no_targets') # type: bool
+ self.__include_directories = self.config.get('include_directories') # type: bool
+ self.__include_symlinks = self.config.get('include_symlinks') # type: bool
+ else:
+ self.output = None
+ self.extensions = []
+ self.prefixes = []
+ self.files = []
+ self.text = None # type: t.Optional[bool]
+ self.ignore_self = False
+ self.intercept = False
+ self.minimum_python_version = None # type: t.Optional[str]
+
+ self.__all_targets = False
+ self.__no_targets = True
+ self.__include_directories = False
+ self.__include_symlinks = False
+
+ if self.no_targets:
+ mutually_exclusive = (
+ 'extensions',
+ 'prefixes',
+ 'files',
+ 'text',
+ 'ignore_self',
+ 'all_targets',
+ 'include_directories',
+ 'include_symlinks',
+ )
+
+ problems = sorted(name for name in mutually_exclusive if getattr(self, name))
+
+ if problems:
+ raise ApplicationError('Sanity test "%s" option "no_targets" is mutually exclusive with options: %s' % (self.name, ', '.join(problems)))
+
+ @property
+ def all_targets(self): # type: () -> bool
+ """True if test targets will not be filtered using includes, excludes, requires or changes. Mutually exclusive with no_targets."""
+ return self.__all_targets
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return self.__no_targets
+
+ @property
+ def include_directories(self): # type: () -> bool
+ """True if the test targets should include directories."""
+ return self.__include_directories
+
+ @property
+ def include_symlinks(self): # type: () -> bool
+ """True if the test targets should include symlinks."""
+ return self.__include_symlinks
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ if self.no_targets:
+ return []
+
+ if self.text is not None:
+ if self.text:
+ targets = [target for target in targets if not is_binary_file(target.path)]
+ else:
+ targets = [target for target in targets if is_binary_file(target.path)]
+
+ if self.extensions:
+ targets = [target for target in targets if os.path.splitext(target.path)[1] in self.extensions
+ or (is_subdir(target.path, 'bin') and '.py' in self.extensions)]
+
+ if self.prefixes:
+ targets = [target for target in targets if any(target.path.startswith(pre) for pre in self.prefixes)]
+
+ if self.files:
+ targets = [target for target in targets if os.path.basename(target.path) in self.files]
+
+ if self.ignore_self and data_context().content.is_ansible:
+ relative_self_path = os.path.relpath(self.path, data_context().content.root)
+ targets = [target for target in targets if target.path != relative_self_path]
+
+ return targets
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ if self.minimum_python_version:
+ if str_to_version(python_version) < str_to_version(self.minimum_python_version):
+ display.warning("Skipping sanity test '%s' on unsupported Python %s; requires Python %s or newer." % (
+ self.name, python_version, self.minimum_python_version))
+ return SanitySkipped(self.name, 'Test requires Python %s or newer' % (self.minimum_python_version, ))
+
+ cmd = [find_python(python_version), self.path]
+
+ env = ansible_environment(args, color=False)
+
+ pattern = None
+ data = None
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if self.config:
+ if self.output == 'path-line-column-message':
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+ elif self.output == 'path-message':
+ pattern = '^(?P<path>[^:]*): (?P<message>.*)$'
+ else:
+ pattern = ApplicationError('Unsupported output type: %s' % self.output)
+
+ if not self.no_targets:
+ data = '\n'.join(paths)
+
+ if data:
+ display.info(data, verbosity=4)
+
+ try:
+ if self.intercept:
+ stdout, stderr = intercept_command(args, cmd, target_name='sanity.%s' % self.name, data=data, env=env, capture=True, disable_coverage=True)
+ else:
+ stdout, stderr = run_command(args, cmd, data=data, env=env, capture=True)
+
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout and not stderr:
+ if pattern:
+ matches = parse_to_list_of_dict(pattern, stdout)
+
+ messages = [SanityMessage(
+ message=m['message'],
+ path=m['path'],
+ line=int(m.get('line', 0)),
+ column=int(m.get('column', 0)),
+ ) for m in matches]
+
+ messages = settings.process_errors(messages, paths)
+
+ if not messages:
+ return SanitySuccess(self.name)
+
+ return SanityFailure(self.name, messages=messages)
+
+ if stderr or status:
+ summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ return SanityFailure(self.name, summary=summary)
+
+ messages = settings.process_errors([], paths)
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityFunc(SanityTest):
+ """Base class for sanity test plugins."""
+ def __init__(self):
+ name = self.__class__.__name__
+ name = re.sub(r'Test$', '', name) # drop Test suffix
+ name = re.sub(r'(.)([A-Z][a-z]+)', r'\1-\2', name).lower() # use dashes instead of capitalization
+
+ super(SanityFunc, self).__init__(name)
+
+
+class SanityVersionNeutral(SanityFunc):
+ """Base class for sanity test plugins which are idependent of the python version being used."""
+ @abc.abstractmethod
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return None
+
+
+class SanitySingleVersion(SanityFunc):
+ """Base class for sanity test plugins which should run on a single python version."""
+ @abc.abstractmethod
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args): # type: (SanityConfig) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, None)
+
+
+class SanityMultipleVersion(SanityFunc):
+ """Base class for sanity test plugins which should run on multiple python versions."""
+ @abc.abstractmethod
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+
+ def load_processor(self, args, python_version): # type: (SanityConfig, str) -> SanityIgnoreProcessor
+ """Load the ignore processor for this sanity test."""
+ return SanityIgnoreProcessor(args, self, python_version)
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ return SUPPORTED_PYTHON_VERSIONS
+
+
+SANITY_TESTS = (
+)
+
+
+def sanity_init():
+ """Initialize full sanity test list (includes code-smell scripts determined at runtime)."""
+ import_plugins('sanity')
+ sanity_plugins = {} # type: t.Dict[str, t.Type[SanityFunc]]
+ load_plugins(SanityFunc, sanity_plugins)
+ sanity_tests = tuple([plugin() for plugin in sanity_plugins.values() if data_context().content.is_ansible or not plugin.ansible_only])
+ global SANITY_TESTS # pylint: disable=locally-disabled, global-statement
+ SANITY_TESTS = tuple(sorted(sanity_tests + collect_code_smell_tests(), key=lambda k: k.name))
diff --git a/test/lib/ansible_test/_internal/sanity/ansible_doc.py b/test/lib/ansible_test/_internal/sanity/ansible_doc.py
new file mode 100644
index 00000000..c6b997cf
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/ansible_doc.py
@@ -0,0 +1,144 @@
+"""Sanity test for ansible-doc."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityFailure,
+ SanitySuccess,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ is_subdir,
+)
+
+from ..util_common import (
+ intercept_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+
+class AnsibleDocTest(SanitySingleVersion):
+ """Sanity test for ansible-doc."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ # This should use documentable plugins from constants instead
+ unsupported_plugin_types = set([
+ # not supported by ansible-doc
+ 'action',
+ 'doc_fragments',
+ 'filter',
+ 'module_utils',
+ 'terminal',
+ 'test',
+ ])
+
+ plugin_paths = [plugin_path for plugin_type, plugin_path in data_context().content.plugin_paths.items() if plugin_type not in unsupported_plugin_types]
+
+ return [target for target in targets
+ if os.path.splitext(target.path)[1] == '.py'
+ and os.path.basename(target.path) != '__init__.py'
+ and any(is_subdir(target.path, path) for path in plugin_paths)
+ ]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ doc_targets = collections.defaultdict(list)
+ target_paths = collections.defaultdict(dict)
+
+ remap_types = dict(
+ modules='module',
+ )
+
+ for plugin_type, plugin_path in data_context().content.plugin_paths.items():
+ plugin_type = remap_types.get(plugin_type, plugin_type)
+
+ for plugin_file_path in [target.name for target in targets.include if is_subdir(target.path, plugin_path)]:
+ plugin_name = os.path.splitext(os.path.basename(plugin_file_path))[0]
+
+ if plugin_name.startswith('_'):
+ plugin_name = plugin_name[1:]
+
+ doc_targets[plugin_type].append(data_context().content.prefix + plugin_name)
+ target_paths[plugin_type][data_context().content.prefix + plugin_name] = plugin_file_path
+
+ env = ansible_environment(args, color=False)
+ error_messages = []
+
+ for doc_type in sorted(doc_targets):
+ for format_option in [None, '--json']:
+ cmd = ['ansible-doc', '-t', doc_type]
+ if format_option is not None:
+ cmd.append(format_option)
+ cmd.extend(sorted(doc_targets[doc_type]))
+
+ try:
+ with coverage_context(args):
+ stdout, stderr = intercept_command(args, cmd, target_name='ansible-doc', env=env, capture=True, python_version=python_version)
+
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if status:
+ summary = u'%s' % SubprocessError(cmd=cmd, status=status, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if stdout:
+ display.info(stdout.strip(), verbosity=3)
+
+ if stderr:
+ # ignore removed module/plugin warnings
+ stderr = re.sub(r'\[WARNING\]: [^ ]+ [^ ]+ has been removed\n', '', stderr).strip()
+
+ if stderr:
+ summary = u'Output on stderr from ansible-doc is considered an error.\n\n%s' % SubprocessError(cmd, stderr=stderr)
+ return SanityFailure(self.name, summary=summary)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ error_messages = settings.process_errors(error_messages, paths)
+
+ if error_messages:
+ return SanityFailure(self.name, messages=error_messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/bin_symlinks.py b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py
new file mode 100644
index 00000000..bd0ba58e
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/bin_symlinks.py
@@ -0,0 +1,110 @@
+"""Sanity test for symlinks in the bin directory."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..payload import (
+ ANSIBLE_BIN_SYMLINK_MAP,
+ __file__ as symlink_map_full_path,
+)
+
+from ..util import (
+ ANSIBLE_BIN_PATH,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+
+class BinSymlinksTest(SanityVersionNeutral):
+ """Sanity test for symlinks in the bin directory."""
+ ansible_only = True
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ bin_root = ANSIBLE_BIN_PATH
+ bin_names = os.listdir(bin_root)
+ bin_paths = sorted(os.path.join(bin_root, path) for path in bin_names)
+
+ injector_root = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector')
+ injector_names = os.listdir(injector_root)
+
+ errors = [] # type: t.List[t.Tuple[str, str]]
+
+ symlink_map_path = os.path.relpath(symlink_map_full_path, data_context().content.root)
+
+ for bin_path in bin_paths:
+ if not os.path.islink(bin_path):
+ errors.append((bin_path, 'not a symbolic link'))
+ continue
+
+ dest = os.readlink(bin_path)
+
+ if not os.path.exists(bin_path):
+ errors.append((bin_path, 'points to non-existent path "%s"' % dest))
+ continue
+
+ if not os.path.isfile(bin_path):
+ errors.append((bin_path, 'points to non-file "%s"' % dest))
+ continue
+
+ map_dest = ANSIBLE_BIN_SYMLINK_MAP.get(os.path.basename(bin_path))
+
+ if not map_dest:
+ errors.append((bin_path, 'missing from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % symlink_map_path))
+ continue
+
+ if dest != map_dest:
+ errors.append((bin_path, 'points to "%s" instead of "%s" from ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, map_dest, symlink_map_path)))
+ continue
+
+ if not os.access(bin_path, os.X_OK):
+ errors.append((bin_path, 'points to non-executable file "%s"' % dest))
+ continue
+
+ for bin_name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
+ if bin_name not in bin_names:
+ bin_path = os.path.join(bin_root, bin_name)
+ errors.append((bin_path, 'missing symlink to "%s" defined in ANSIBLE_BIN_SYMLINK_MAP in file "%s"' % (dest, symlink_map_path)))
+
+ if bin_name not in injector_names:
+ injector_path = os.path.join(injector_root, bin_name)
+ errors.append((injector_path, 'missing symlink to "python.py"'))
+
+ messages = [SanityMessage(message=message, path=os.path.relpath(path, data_context().content.root), confidence=100) for path, message in errors]
+
+ if errors:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/compile.py b/test/lib/ansible_test/_internal/sanity/compile.py
new file mode 100644
index 00000000..5a517272
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/compile.py
@@ -0,0 +1,92 @@
+"""Sanity test for proper python syntax."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ find_python,
+ parse_to_list_of_dict,
+ is_subdir,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class CompileTest(SanityMultipleVersion):
+ """Sanity test for proper python syntax."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args, python_version)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [find_python(python_version), os.path.join(SANITY_ROOT, 'compile', 'compile.py')]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name, python_version=python_version)
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'].replace('./', ''),
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in results]
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python_version)
+
+ return SanitySuccess(self.name, python_version=python_version)
diff --git a/test/lib/ansible_test/_internal/sanity/ignores.py b/test/lib/ansible_test/_internal/sanity/ignores.py
new file mode 100644
index 00000000..8b6df50c
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/ignores.py
@@ -0,0 +1,89 @@
+"""Sanity test for the sanity ignore file."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..sanity import (
+ SanityFailure,
+ SanityIgnoreParser,
+ SanityVersionNeutral,
+ SanitySuccess,
+ SanityMessage,
+)
+
+from ..test import (
+ calculate_confidence,
+ calculate_best_confidence,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class IgnoresTest(SanityVersionNeutral):
+ """Sanity test for sanity test ignore entries."""
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ sanity_ignore = SanityIgnoreParser.load(args)
+
+ messages = []
+
+ # parse errors
+
+ messages.extend(SanityMessage(
+ message=message,
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=column,
+ confidence=calculate_confidence(sanity_ignore.path, line, args.metadata) if args.metadata.changes else None,
+ ) for line, column, message in sanity_ignore.parse_errors)
+
+ # file not found errors
+
+ messages.extend(SanityMessage(
+ message="%s '%s' does not exist" % ("Directory" if path.endswith(os.path.sep) else "File", path),
+ path=sanity_ignore.relative_path,
+ line=line,
+ column=1,
+ confidence=calculate_best_confidence(((sanity_ignore.path, line), (path, 0)), args.metadata) if args.metadata.changes else None,
+ ) for line, path in sanity_ignore.file_not_found_errors)
+
+ # conflicting ignores and skips
+
+ for test_name, ignores in sanity_ignore.ignores.items():
+ for ignore_path, ignore_entry in ignores.items():
+ skip_line_no = sanity_ignore.skips.get(test_name, {}).get(ignore_path)
+
+ if not skip_line_no:
+ continue
+
+ for ignore_line_no in ignore_entry.values():
+ messages.append(SanityMessage(
+ message="Ignoring '%s' is unnecessary due to skip entry on line %d" % (ignore_path, skip_line_no),
+ path=sanity_ignore.relative_path,
+ line=ignore_line_no,
+ column=1,
+ confidence=calculate_confidence(sanity_ignore.path, ignore_line_no, args.metadata) if args.metadata.changes else None,
+ ))
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/import.py b/test/lib/ansible_test/_internal/sanity/import.py
new file mode 100644
index 00000000..7d4776ae
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/import.py
@@ -0,0 +1,184 @@
+"""Sanity test for proper import exception handling."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityMultipleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ remove_tree,
+ display,
+ parse_to_list_of_dict,
+ is_subdir,
+ generate_pip_command,
+ find_python,
+)
+
+from ..util_common import (
+ intercept_command,
+ run_command,
+ ResultType,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+)
+
+from ..executor import (
+ generate_pip_install,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+from ..venv import (
+ create_virtual_environment,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class ImportTest(SanityMultipleVersion):
+ """Sanity test for proper import exception handling."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' and
+ (is_subdir(target.path, data_context().content.module_path) or is_subdir(target.path, data_context().content.module_utils_path))]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ capture_pip = args.verbosity < 2
+
+ python = find_python(python_version)
+
+ if python_version.startswith('2.') and args.requirements:
+ # hack to make sure that virtualenv is available under Python 2.x
+ # on Python 3.x we can use the built-in venv
+ pip = generate_pip_command(python)
+ run_command(args, generate_pip_install(pip, '', packages=['virtualenv']), capture=capture_pip)
+
+ settings = self.load_processor(args, python_version)
+
+ paths = [target.path for target in targets.include]
+
+ env = ansible_environment(args, color=False)
+
+ temp_root = os.path.join(ResultType.TMP.path, 'sanity', 'import')
+
+ # create a clean virtual environment to minimize the available imports beyond the python standard library
+ virtual_environment_path = os.path.join(temp_root, 'minimal-py%s' % python_version.replace('.', ''))
+ virtual_environment_bin = os.path.join(virtual_environment_path, 'bin')
+
+ remove_tree(virtual_environment_path)
+
+ if not create_virtual_environment(args, python_version, virtual_environment_path):
+ display.warning("Skipping sanity test '%s' on Python %s due to missing virtual environment support." % (self.name, python_version))
+ return SanitySkipped(self.name, python_version)
+
+ # add the importer to our virtual environment so it can be accessed through the coverage injector
+ importer_path = os.path.join(virtual_environment_bin, 'importer.py')
+ yaml_to_json_path = os.path.join(virtual_environment_bin, 'yaml_to_json.py')
+ if not args.explain:
+ os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'importer.py')), importer_path)
+ os.symlink(os.path.abspath(os.path.join(SANITY_ROOT, 'import', 'yaml_to_json.py')), yaml_to_json_path)
+
+ # activate the virtual environment
+ env['PATH'] = '%s:%s' % (virtual_environment_bin, env['PATH'])
+
+ env.update(
+ SANITY_TEMP_PATH=ResultType.TMP.path,
+ )
+
+ if data_context().content.collection:
+ env.update(
+ SANITY_COLLECTION_FULL_NAME=data_context().content.collection.full_name,
+ SANITY_EXTERNAL_PYTHON=python,
+ )
+
+ virtualenv_python = os.path.join(virtual_environment_bin, 'python')
+ virtualenv_pip = generate_pip_command(virtualenv_python)
+
+ # make sure coverage is available in the virtual environment if needed
+ if args.coverage:
+ run_command(args, generate_pip_install(virtualenv_pip, '', packages=['setuptools']), env=env, capture=capture_pip)
+ run_command(args, generate_pip_install(virtualenv_pip, '', packages=['coverage']), env=env, capture=capture_pip)
+
+ try:
+ # In some environments pkg_resources is installed as a separate pip package which needs to be removed.
+ # For example, using Python 3.8 on Ubuntu 18.04 a virtualenv is created with only pip and setuptools.
+ # However, a venv is created with an additional pkg-resources package which is independent of setuptools.
+ # Making sure pkg-resources is removed preserves the import test consistency between venv and virtualenv.
+ # Additionally, in the above example, the pyparsing package vendored with pkg-resources is out-of-date and generates deprecation warnings.
+ # Thus it is important to remove pkg-resources to prevent system installed packages from generating deprecation warnings.
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pkg-resources'], env=env, capture=capture_pip)
+ except SubprocessError:
+ pass
+
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'setuptools'], env=env, capture=capture_pip)
+ run_command(args, virtualenv_pip + ['uninstall', '--disable-pip-version-check', '-y', 'pip'], env=env, capture=capture_pip)
+
+ cmd = ['importer.py']
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ results = []
+
+ try:
+ with coverage_context(args):
+ stdout, stderr = intercept_command(args, cmd, self.name, env, capture=True, data=data, python_version=python_version,
+ virtualenv=virtualenv_python)
+
+ if stdout or stderr:
+ raise SubprocessError(cmd, stdout=stdout, stderr=stderr)
+ except SubprocessError as ex:
+ if ex.status != 10 or ex.stderr or not ex.stdout:
+ raise
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, ex.stdout)
+
+ relative_temp_root = os.path.relpath(temp_root, data_context().content.root) + os.path.sep
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=os.path.relpath(r['path'], relative_temp_root) if r['path'].startswith(relative_temp_root) else r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ ) for r in results]
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results, python_version=python_version)
+
+ return SanitySuccess(self.name, python_version=python_version)
diff --git a/test/lib/ansible_test/_internal/sanity/integration_aliases.py b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
new file mode 100644
index 00000000..e21c093a
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/integration_aliases.py
@@ -0,0 +1,399 @@
+"""Sanity test to check integration test aliases."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import textwrap
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanityTargets,
+ SANITY_ROOT,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..target import (
+ filter_targets,
+ walk_posix_integration_targets,
+ walk_windows_integration_targets,
+ walk_integration_targets,
+ walk_module_targets,
+)
+
+from ..cloud import (
+ get_cloud_platforms,
+)
+
+from ..io import (
+ read_text_file,
+)
+
+from ..util import (
+ display,
+ find_python,
+ raw_command,
+)
+
+from ..util_common import (
+ write_json_test_results,
+ ResultType,
+)
+
+
+class IntegrationAliasesTest(SanityVersionNeutral):
+ """Sanity test to evaluate integration test aliases."""
+ CI_YML = '.azure-pipelines/azure-pipelines.yml'
+ TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future
+
+ DISABLED = 'disabled/'
+ UNSTABLE = 'unstable/'
+ UNSUPPORTED = 'unsupported/'
+
+ EXPLAIN_URL = 'https://docs.ansible.com/ansible/devel/dev_guide/testing/sanity/integration-aliases.html'
+
+ TEMPLATE_DISABLED = """
+ The following integration tests are **disabled** [[explain]({explain_url}#disabled)]:
+
+ {tests}
+
+ Consider fixing the integration tests before or alongside changes.
+ """
+
+ TEMPLATE_UNSTABLE = """
+ The following integration tests are **unstable** [[explain]({explain_url}#unstable)]:
+
+ {tests}
+
+ Tests may need to be restarted due to failures unrelated to changes.
+ """
+
+ TEMPLATE_UNSUPPORTED = """
+ The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]:
+
+ {tests}
+
+ Consider running the tests manually or extending test infrastructure to add support.
+ """
+
+ TEMPLATE_UNTESTED = """
+ The following modules have **no integration tests** [[explain]({explain_url}#untested)]:
+
+ {tests}
+
+ Consider adding integration tests before or alongside changes.
+ """
+
+ ansible_only = True
+
+ def __init__(self):
+ super(IntegrationAliasesTest, self).__init__()
+
+ self._ci_config = {} # type: t.Dict[str, t.Any]
+ self._ci_test_groups = {} # type: t.Dict[str, t.List[int]]
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ def load_ci_config(self, args): # type: (SanityConfig) -> t.Dict[str, t.Any]
+ """Load and return the CI YAML configuration."""
+ if not self._ci_config:
+ self._ci_config = self.load_yaml(args, self.CI_YML)
+
+ return self._ci_config
+
+ @property
+ def ci_test_groups(self): # type: () -> t.Dict[str, t.List[int]]
+ """Return a dictionary of CI test names and their group(s)."""
+ if not self._ci_test_groups:
+ test_groups = {}
+
+ for stage in self._ci_config['stages']:
+ for job in stage['jobs']:
+ if job.get('template') != 'templates/matrix.yml':
+ continue
+
+ parameters = job['parameters']
+
+ groups = parameters.get('groups', [])
+ test_format = parameters.get('testFormat', '{0}')
+ test_group_format = parameters.get('groupFormat', '{0}/{{1}}')
+
+ for target in parameters['targets']:
+ test = target.get('test') or target.get('name')
+
+ if groups:
+ tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups]
+ else:
+ tests_formatted = [test_format.format(test)]
+
+ for test_formatted in tests_formatted:
+ parts = test_formatted.split('/')
+ key = parts[0]
+
+ if key in ('sanity', 'units'):
+ continue
+
+ try:
+ group = int(parts[-1])
+ except ValueError:
+ continue
+
+ if group < 1 or group > 99:
+ continue
+
+ group_set = test_groups.setdefault(key, set())
+ group_set.add(group)
+
+ self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items())
+
+ return self._ci_test_groups
+
+ def format_test_group_alias(self, name, fallback=''):
+ """
+ :type name: str
+ :type fallback: str
+ :rtype: str
+ """
+ group_numbers = self.ci_test_groups.get(name, None)
+
+ if group_numbers:
+ if min(group_numbers) != 1:
+ display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True)
+
+ if max(group_numbers) != len(group_numbers):
+ display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True)
+
+ if max(group_numbers) > 9:
+ alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1)))
+ elif len(group_numbers) > 1:
+ alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers))
+ else:
+ alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers))
+ elif fallback:
+ alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1)
+ else:
+ raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML))
+
+ return alias
+
+ def load_yaml(self, args, path): # type: (SanityConfig, str) -> t.Dict[str, t.Any]
+ """Load the specified YAML file and return the contents."""
+ yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py')
+ python = find_python(args.python_version)
+
+ return json.loads(raw_command([python, yaml_to_json_path], data=read_text_file(path), capture=True)[0])
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if not os.path.isfile(self.CI_YML):
+ return SanityFailure(self.name, messages=[SanityMessage(
+ message='file missing',
+ path=self.CI_YML,
+ )])
+
+ results = dict(
+ comments=[],
+ labels={},
+ )
+
+ self.load_ci_config(args)
+ self.check_changes(args, results)
+
+ write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results)
+
+ messages = []
+
+ messages += self.check_posix_targets(args)
+ messages += self.check_windows_targets()
+
+ if messages:
+ return SanityFailure(self.name, messages=messages)
+
+ return SanitySuccess(self.name)
+
+ def check_posix_targets(self, args):
+ """
+ :type args: SanityConfig
+ :rtype: list[SanityMessage]
+ """
+ posix_targets = tuple(walk_posix_integration_targets())
+
+ clouds = get_cloud_platforms(args, posix_targets)
+ cloud_targets = ['cloud/%s/' % cloud for cloud in clouds]
+
+ all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], include=True, directories=False, errors=False))
+ invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, directories=False, errors=False))
+
+ messages = []
+
+ for target in invalid_cloud_targets:
+ for alias in target.aliases:
+ if alias.startswith('cloud/') and alias != 'cloud/':
+ if any(alias.startswith(cloud_target) for cloud_target in cloud_targets):
+ continue
+
+ messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path))
+
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False,
+ directories=False, errors=False)),
+ find=self.format_test_group_alias('linux').replace('linux', 'posix'),
+ find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], include=True, directories=False,
+ errors=False)),
+ find=self.format_test_group_alias('generic'),
+ )
+
+ for cloud in clouds:
+ messages += self.check_ci_group(
+ targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], include=True, directories=False, errors=False)),
+ find=self.format_test_group_alias(cloud, 'cloud'),
+ find_incidental=['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ return messages
+
+ def check_windows_targets(self):
+ """
+ :rtype: list[SanityMessage]
+ """
+ windows_targets = tuple(walk_windows_integration_targets())
+
+ messages = []
+
+ messages += self.check_ci_group(
+ targets=windows_targets,
+ find=self.format_test_group_alias('windows'),
+ find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX],
+ )
+
+ return messages
+
+ def check_ci_group(self, targets, find, find_incidental=None):
+ """
+ :type targets: tuple[CompletionTarget]
+ :type find: str
+ :type find_incidental: list[str] | None
+ :rtype: list[SanityMessage]
+ """
+ all_paths = set(target.path for target in targets)
+ supported_paths = set(target.path for target in filter_targets(targets, [find], include=True, directories=False, errors=False))
+ unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], include=True, directories=False, errors=False))
+
+ if find_incidental:
+ incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, include=True, directories=False, errors=False))
+ else:
+ incidental_paths = set()
+
+ unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
+ conflicting_paths = supported_paths & unsupported_paths
+
+ unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
+ conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
+
+ messages = []
+
+ for path in unassigned_paths:
+ messages.append(SanityMessage(unassigned_message, '%s/aliases' % path))
+
+ for path in conflicting_paths:
+ messages.append(SanityMessage(conflicting_message, '%s/aliases' % path))
+
+ return messages
+
+ def check_changes(self, args, results):
+ """
+ :type args: SanityConfig
+ :type results: dict[str, any]
+ """
+ integration_targets = list(walk_integration_targets())
+ module_targets = list(walk_module_targets())
+
+ integration_targets_by_name = dict((target.name, target) for target in integration_targets)
+ module_names_by_path = dict((target.path, target.module) for target in module_targets)
+
+ disabled_targets = []
+ unstable_targets = []
+ unsupported_targets = []
+
+ for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
+ for target in args.metadata.change_description.focused_command_targets[command]:
+ if self.DISABLED in integration_targets_by_name[target].aliases:
+ disabled_targets.append(target)
+ elif self.UNSTABLE in integration_targets_by_name[target].aliases:
+ unstable_targets.append(target)
+ elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
+ unsupported_targets.append(target)
+
+ untested_modules = []
+
+ for path in args.metadata.change_description.no_integration_paths:
+ module = module_names_by_path.get(path)
+
+ if module:
+ untested_modules.append(module)
+
+ comments = [
+ self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
+ self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
+ self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
+ self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
+ ]
+
+ comments = [comment for comment in comments if comment]
+
+ labels = dict(
+ needs_tests=bool(untested_modules),
+ disabled_tests=bool(disabled_targets),
+ unstable_tests=bool(unstable_targets),
+ unsupported_tests=bool(unsupported_targets),
+ )
+
+ results['comments'] += comments
+ results['labels'].update(labels)
+
+ def format_comment(self, template, targets):
+ """
+ :type template: str
+ :type targets: list[str]
+ :rtype: str | None
+ """
+ if not targets:
+ return None
+
+ tests = '\n'.join('- %s' % target for target in targets)
+
+ data = dict(
+ explain_url=self.EXPLAIN_URL,
+ tests=tests,
+ )
+
+ message = textwrap.dedent(template).strip().format(**data)
+
+ return message
diff --git a/test/lib/ansible_test/_internal/sanity/pep8.py b/test/lib/ansible_test/_internal/sanity/pep8.py
new file mode 100644
index 00000000..9eb40dbc
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pep8.py
@@ -0,0 +1,109 @@
+"""Sanity test for PEP 8 style guidelines using pycodestyle."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ read_lines_without_comments,
+ parse_to_list_of_dict,
+ find_python,
+ is_subdir,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class Pep8Test(SanitySingleVersion):
+ """Sanity test for PEP 8 style guidelines using pycodestyle."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ current_ignore_file = os.path.join(SANITY_ROOT, 'pep8', 'current-ignore.txt')
+ current_ignore = sorted(read_lines_without_comments(current_ignore_file, remove_blank_lines=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [
+ find_python(python_version),
+ '-m', 'pycodestyle',
+ '--max-line-length', '160',
+ '--config', '/dev/null',
+ '--ignore', ','.join(sorted(current_ignore)),
+ ] + paths
+
+ if paths:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ if stdout:
+ pattern = '^(?P<path>[^:]*):(?P<line>[0-9]+):(?P<column>[0-9]+): (?P<code>[WE][0-9]{3}) (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stdout)
+ else:
+ results = []
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level='warning' if r['code'].startswith('W') else 'error',
+ code=r['code'],
+ ) for r in results]
+
+ errors = settings.process_errors(results, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/pslint.py b/test/lib/ansible_test/_internal/sanity/pslint.py
new file mode 100644
index 00000000..256eee04
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pslint.py
@@ -0,0 +1,121 @@
+"""Sanity test using PSScriptAnalyzer."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import re
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ find_executable,
+ ANSIBLE_TEST_DATA_ROOT,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class PslintTest(SanityVersionNeutral):
+ """Sanity test using PSScriptAnalyzer."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AnsibleTest'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1', '.psd1')]
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('pwsh', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmds = []
+
+ if args.requirements:
+ cmds.append([os.path.join(ANSIBLE_TEST_DATA_ROOT, 'requirements', 'sanity.ps1')])
+
+ cmds.append([os.path.join(SANITY_ROOT, 'pslint', 'pslint.ps1')] + paths)
+
+ stdout = ''
+
+ for cmd in cmds:
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ severity = [
+ 'Information',
+ 'Warning',
+ 'Error',
+ 'ParseError',
+ ]
+
+ cwd = data_context().content.root + '/'
+
+ # replace unicode smart quotes and ellipsis with ascii versions
+ stdout = re.sub(u'[\u2018\u2019]', "'", stdout)
+ stdout = re.sub(u'[\u201c\u201d]', '"', stdout)
+ stdout = re.sub(u'[\u2026]', '...', stdout)
+
+ messages = json.loads(stdout)
+
+ errors = [SanityMessage(
+ code=m['RuleName'],
+ message=m['Message'],
+ path=m['ScriptPath'].replace(cwd, ''),
+ line=m['Line'] or 0,
+ column=m['Column'] or 0,
+ level=severity[m['Severity']],
+ ) for m in messages]
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/pylint.py b/test/lib/ansible_test/_internal/sanity/pylint.py
new file mode 100644
index 00000000..324e5873
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/pylint.py
@@ -0,0 +1,289 @@
+"""Sanity test using pylint."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import itertools
+import json
+import os
+import datetime
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ ConfigParser,
+ is_subdir,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetail,
+ CollectionDetailError,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class PylintTest(SanitySingleVersion):
+ """Sanity test using pylint."""
+
+ def __init__(self):
+ super(PylintTest, self).__init__()
+ self.optional_error_codes.update([
+ 'ansible-deprecated-date',
+ 'too-complex',
+ ])
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ @property
+ def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]
+ """A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
+ # Python 3.9 is not supported on pylint < 2.5.0.
+ # Unfortunately pylint 2.5.0 and later include an unfixed regression.
+ # See: https://github.com/PyCQA/pylint/issues/3701
+ return tuple(python_version for python_version in super(PylintTest, self).supported_python_versions if python_version not in ('3.9',))
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.py' or is_subdir(target.path, 'bin')]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ plugin_dir = os.path.join(SANITY_ROOT, 'pylint', 'plugins')
+ plugin_names = sorted(p[0] for p in [
+ os.path.splitext(p) for p in os.listdir(plugin_dir)] if p[1] == '.py' and p[0] != '__init__')
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ module_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in
+ paths if is_subdir(p, data_context().content.module_path)]
+ module_dirs = sorted(set([p[0] for p in module_paths if len(p) > 1]))
+
+ large_module_group_threshold = 500
+ large_module_groups = [key for key, value in
+ itertools.groupby(module_paths, lambda p: p[0] if len(p) > 1 else '') if len(list(value)) > large_module_group_threshold]
+
+ large_module_group_paths = [os.path.relpath(p, data_context().content.module_path).split(os.path.sep) for p in paths
+ if any(is_subdir(p, os.path.join(data_context().content.module_path, g)) for g in large_module_groups)]
+ large_module_group_dirs = sorted(set([os.path.sep.join(p[:2]) for p in large_module_group_paths if len(p) > 2]))
+
+ contexts = []
+ remaining_paths = set(paths)
+
+ def add_context(available_paths, context_name, context_filter):
+ """
+ :type available_paths: set[str]
+ :type context_name: str
+ :type context_filter: (str) -> bool
+ """
+ filtered_paths = set(p for p in available_paths if context_filter(p))
+ contexts.append((context_name, sorted(filtered_paths)))
+ available_paths -= filtered_paths
+
+ def filter_path(path_filter=None):
+ """
+ :type path_filter: str
+ :rtype: (str) -> bool
+ """
+ def context_filter(path_to_filter):
+ """
+ :type path_to_filter: str
+ :rtype: bool
+ """
+ return is_subdir(path_to_filter, path_filter)
+
+ return context_filter
+
+ for large_module_dir in large_module_group_dirs:
+ add_context(remaining_paths, 'modules/%s' % large_module_dir, filter_path(os.path.join(data_context().content.module_path, large_module_dir)))
+
+ for module_dir in module_dirs:
+ add_context(remaining_paths, 'modules/%s' % module_dir, filter_path(os.path.join(data_context().content.module_path, module_dir)))
+
+ add_context(remaining_paths, 'modules', filter_path(data_context().content.module_path))
+ add_context(remaining_paths, 'module_utils', filter_path(data_context().content.module_utils_path))
+
+ add_context(remaining_paths, 'units', filter_path(data_context().content.unit_path))
+
+ if data_context().content.collection:
+ add_context(remaining_paths, 'collection', lambda p: True)
+ else:
+ add_context(remaining_paths, 'validate-modules', filter_path('test/lib/ansible_test/_data/sanity/validate-modules/'))
+ add_context(remaining_paths, 'validate-modules-unit', filter_path('test/lib/ansible_test/tests/validate-modules-unit/'))
+ add_context(remaining_paths, 'sanity', filter_path('test/lib/ansible_test/_data/sanity/'))
+ add_context(remaining_paths, 'ansible-test', filter_path('test/lib/'))
+ add_context(remaining_paths, 'test', filter_path('test/'))
+ add_context(remaining_paths, 'hacking', filter_path('hacking/'))
+ add_context(remaining_paths, 'ansible', lambda p: True)
+
+ messages = []
+ context_times = []
+
+ python = find_python(python_version)
+
+ collection_detail = None
+
+ if data_context().content.collection:
+ try:
+ collection_detail = get_collection_detail(args, python)
+
+ if not collection_detail.version:
+ display.warning('Skipping pylint collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping pylint collection version checks since collection detail loading failed: %s' % ex.reason)
+
+ test_start = datetime.datetime.utcnow()
+
+ for context, context_paths in sorted(contexts):
+ if not context_paths:
+ continue
+
+ context_start = datetime.datetime.utcnow()
+ messages += self.pylint(args, context, context_paths, plugin_dir, plugin_names, python, collection_detail)
+ context_end = datetime.datetime.utcnow()
+
+ context_times.append('%s: %d (%s)' % (context, len(context_paths), context_end - context_start))
+
+ test_end = datetime.datetime.utcnow()
+
+ for context_time in context_times:
+ display.info(context_time, verbosity=4)
+
+ display.info('total: %d (%s)' % (len(paths), test_end - test_start), verbosity=4)
+
+ errors = [SanityMessage(
+ message=m['message'].replace('\n', ' '),
+ path=m['path'],
+ line=int(m['line']),
+ column=int(m['column']),
+ level=m['type'],
+ code=m['symbol'],
+ ) for m in messages]
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def pylint(
+ args, # type: SanityConfig
+ context, # type: str
+ paths, # type: t.List[str]
+ plugin_dir, # type: str
+ plugin_names, # type: t.List[str]
+ python, # type: str
+ collection_detail, # type: CollectionDetail
+ ): # type: (...) -> t.List[t.Dict[str, str]]
+ """Run pylint using the config specified by the context on the specified paths."""
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', context.split('/')[0] + '.cfg')
+
+ if not os.path.exists(rcfile):
+ if data_context().content.collection:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'collection.cfg')
+ else:
+ rcfile = os.path.join(SANITY_ROOT, 'pylint', 'config', 'default.cfg')
+
+ parser = ConfigParser()
+ parser.read(rcfile)
+
+ if parser.has_section('ansible-test'):
+ config = dict(parser.items('ansible-test'))
+ else:
+ config = dict()
+
+ disable_plugins = set(i.strip() for i in config.get('disable-plugins', '').split(',') if i)
+ load_plugins = set(plugin_names + ['pylint.extensions.mccabe']) - disable_plugins
+
+ cmd = [
+ python,
+ '-m', 'pylint',
+ '--jobs', '0',
+ '--reports', 'n',
+ '--max-line-length', '160',
+ '--max-complexity', '20',
+ '--rcfile', rcfile,
+ '--output-format', 'json',
+ '--load-plugins', ','.join(load_plugins),
+ ] + paths
+
+ if data_context().content.collection:
+ cmd.extend(['--collection-name', data_context().content.collection.full_name])
+
+ if collection_detail and collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+
+ append_python_path = [plugin_dir]
+
+ if data_context().content.collection:
+ append_python_path.append(data_context().content.collection.root)
+
+ env = ansible_environment(args)
+ env['PYTHONPATH'] += os.path.pathsep + os.path.pathsep.join(append_python_path)
+
+ # expose plugin paths for use in custom plugins
+ env.update(dict(('ANSIBLE_TEST_%s_PATH' % k.upper(), os.path.abspath(v) + os.path.sep) for k, v in data_context().content.plugin_paths.items()))
+
+ if paths:
+ display.info('Checking %d file(s) in context "%s" with config: %s' % (len(paths), context, rcfile), verbosity=1)
+
+ try:
+ stdout, stderr = run_command(args, cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status >= 32:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+ else:
+ stdout = None
+
+ if not args.explain and stdout:
+ messages = json.loads(stdout)
+ else:
+ messages = []
+
+ return messages
diff --git a/test/lib/ansible_test/_internal/sanity/rstcheck.py b/test/lib/ansible_test/_internal/sanity/rstcheck.py
new file mode 100644
index 00000000..2d8a01d5
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/rstcheck.py
@@ -0,0 +1,95 @@
+"""Sanity test using rstcheck."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ parse_to_list_of_dict,
+ read_lines_without_comments,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class RstcheckTest(SanitySingleVersion):
+ """Sanity test using rstcheck."""
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] in ('.rst',)]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ ignore_file = os.path.join(SANITY_ROOT, 'rstcheck', 'ignore-substitutions.txt')
+ ignore_substitutions = sorted(set(read_lines_without_comments(ignore_file, remove_blank_lines=True)))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ cmd = [
+ find_python(python_version),
+ '-m', 'rstcheck',
+ '--report', 'warning',
+ '--ignore-substitutions', ','.join(ignore_substitutions),
+ ] + paths
+
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stdout:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ pattern = r'^(?P<path>[^:]*):(?P<line>[0-9]+): \((?P<level>INFO|WARNING|ERROR|SEVERE)/[0-4]\) (?P<message>.*)$'
+
+ results = parse_to_list_of_dict(pattern, stderr)
+
+ results = [SanityMessage(
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=0,
+ level=r['level'],
+ ) for r in results]
+
+ settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/sanity_docs.py b/test/lib/ansible_test/_internal/sanity/sanity_docs.py
new file mode 100644
index 00000000..44638075
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/sanity_docs.py
@@ -0,0 +1,62 @@
+"""Sanity test for documentation of sanity tests."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ sanity_get_tests,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class SanityDocsTest(SanityVersionNeutral):
+ """Sanity test for documentation of sanity tests."""
+ ansible_only = True
+
+ @property
+ def can_ignore(self): # type: () -> bool
+ """True if the test supports ignore entries."""
+ return False
+
+ @property
+ def no_targets(self): # type: () -> bool
+ """True if the test does not use test targets. Mutually exclusive with all_targets."""
+ return True
+
+ # noinspection PyUnusedLocal
+ def test(self, args, targets): # pylint: disable=locally-disabled, unused-argument
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ sanity_dir = 'docs/docsite/rst/dev_guide/testing/sanity'
+ sanity_docs = set(part[0] for part in (os.path.splitext(os.path.basename(path)) for path in data_context().content.get_files(sanity_dir))
+ if part[1] == '.rst')
+ sanity_tests = set(sanity_test.name for sanity_test in sanity_get_tests())
+
+ missing = sanity_tests - sanity_docs
+
+ results = []
+
+ results += [SanityMessage(
+ message='missing docs for ansible-test sanity --test %s' % r,
+ path=os.path.join(sanity_dir, '%s.rst' % r),
+ ) for r in sorted(missing)]
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/shellcheck.py b/test/lib/ansible_test/_internal/sanity/shellcheck.py
new file mode 100644
index 00000000..82689ced
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/shellcheck.py
@@ -0,0 +1,110 @@
+"""Sanity test using shellcheck."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from xml.etree.ElementTree import (
+ fromstring,
+ Element,
+)
+
+from .. import types as t
+
+from ..sanity import (
+ SanityVersionNeutral,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SanitySkipped,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ read_lines_without_comments,
+ find_executable,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+
+class ShellcheckTest(SanityVersionNeutral):
+ """Sanity test using shellcheck."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'AT1000'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if os.path.splitext(target.path)[1] == '.sh']
+
+ def test(self, args, targets):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :rtype: TestResult
+ """
+ exclude_file = os.path.join(SANITY_ROOT, 'shellcheck', 'exclude.txt')
+ exclude = set(read_lines_without_comments(exclude_file, remove_blank_lines=True, optional=True))
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ if not find_executable('shellcheck', required='warning'):
+ return SanitySkipped(self.name)
+
+ cmd = [
+ 'shellcheck',
+ '-e', ','.join(sorted(exclude)),
+ '--format', 'checkstyle',
+ ] + paths
+
+ try:
+ stdout, stderr = run_command(args, cmd, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status > 1:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ # json output is missing file paths in older versions of shellcheck, so we'll use xml instead
+ root = fromstring(stdout) # type: Element
+
+ results = []
+
+ for item in root: # type: Element
+ for entry in item: # type: Element
+ results.append(SanityMessage(
+ message=entry.attrib['message'],
+ path=item.attrib['name'],
+ line=int(entry.attrib['line']),
+ column=int(entry.attrib['column']),
+ level=entry.attrib['severity'],
+ code=entry.attrib['source'].replace('ShellCheck.', ''),
+ ))
+
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/validate_modules.py b/test/lib/ansible_test/_internal/sanity/validate_modules.py
new file mode 100644
index 00000000..add3cdc7
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/validate_modules.py
@@ -0,0 +1,149 @@
+"""Sanity test using validate-modules."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from .. import types as t
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ get_collection_detail,
+ CollectionDetailError,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..ci import (
+ get_ci_provider,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class ValidateModulesTest(SanitySingleVersion):
+ """Sanity test using validate-modules."""
+
+ def __init__(self):
+ super(ValidateModulesTest, self).__init__()
+ self.optional_error_codes.update([
+ 'deprecated-date',
+ ])
+
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'A100'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ return [target for target in targets if target.module]
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ env = ansible_environment(args, color=False)
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ python = find_python(python_version)
+
+ cmd = [
+ python,
+ os.path.join(SANITY_ROOT, 'validate-modules', 'validate-modules'),
+ '--format', 'json',
+ '--arg-spec',
+ ] + paths
+
+ if data_context().content.collection:
+ cmd.extend(['--collection', data_context().content.collection.directory])
+
+ try:
+ collection_detail = get_collection_detail(args, python)
+
+ if collection_detail.version:
+ cmd.extend(['--collection-version', collection_detail.version])
+ else:
+ display.warning('Skipping validate-modules collection version checks since no collection version was found.')
+ except CollectionDetailError as ex:
+ display.warning('Skipping validate-modules collection version checks since collection detail loading failed: %s' % ex.reason)
+ else:
+ base_branch = args.base_branch or get_ci_provider().get_base_branch()
+
+ if base_branch:
+ cmd.extend([
+ '--base-branch', base_branch,
+ ])
+ else:
+ display.warning('Cannot perform module comparison against the base branch because the base branch was not detected.')
+
+ try:
+ stdout, stderr = run_command(args, cmd, env=env, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr or status not in (0, 3):
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return SanitySuccess(self.name)
+
+ messages = json.loads(stdout)
+
+ errors = []
+
+ for filename in messages:
+ output = messages[filename]
+
+ for item in output['errors']:
+ errors.append(SanityMessage(
+ path=filename,
+ line=int(item['line']) if 'line' in item else 0,
+ column=int(item['column']) if 'column' in item else 0,
+ level='error',
+ code='%s' % item['code'],
+ message=item['msg'],
+ ))
+
+ errors = settings.process_errors(errors, paths)
+
+ if errors:
+ return SanityFailure(self.name, messages=errors)
+
+ return SanitySuccess(self.name)
diff --git a/test/lib/ansible_test/_internal/sanity/yamllint.py b/test/lib/ansible_test/_internal/sanity/yamllint.py
new file mode 100644
index 00000000..85a576d0
--- /dev/null
+++ b/test/lib/ansible_test/_internal/sanity/yamllint.py
@@ -0,0 +1,136 @@
+"""Sanity test using yamllint."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from .. import types as t
+
+from ..import ansible_util
+
+from ..sanity import (
+ SanitySingleVersion,
+ SanityMessage,
+ SanityFailure,
+ SanitySkipped,
+ SanitySuccess,
+ SANITY_ROOT,
+)
+
+from ..target import (
+ TestTarget,
+)
+
+from ..util import (
+ SubprocessError,
+ display,
+ is_subdir,
+ find_python,
+)
+
+from ..util_common import (
+ run_command,
+)
+
+from ..config import (
+ SanityConfig,
+)
+
+from ..data import (
+ data_context,
+)
+
+
+class YamllintTest(SanitySingleVersion):
+ """Sanity test using yamllint."""
+ @property
+ def error_code(self): # type: () -> t.Optional[str]
+ """Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
+ return 'ansible-test'
+
+ def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
+ """Return the given list of test targets, filtered to include only those relevant for the test."""
+ yaml_targets = [target for target in targets if os.path.splitext(target.path)[1] in ('.yml', '.yaml')]
+
+ for plugin_type, plugin_path in sorted(data_context().content.plugin_paths.items()):
+ if plugin_type == 'module_utils':
+ continue
+
+ yaml_targets.extend([target for target in targets if
+ os.path.splitext(target.path)[1] == '.py' and
+ os.path.basename(target.path) != '__init__.py' and
+ is_subdir(target.path, plugin_path)])
+
+ return yaml_targets
+
+ def test(self, args, targets, python_version):
+ """
+ :type args: SanityConfig
+ :type targets: SanityTargets
+ :type python_version: str
+ :rtype: TestResult
+ """
+ pyyaml_presence = ansible_util.check_pyyaml(args, python_version, quiet=True)
+ if not pyyaml_presence['cloader']:
+ display.warning("Skipping sanity test '%s' due to missing libyaml support in PyYAML."
+ % self.name)
+ return SanitySkipped(self.name)
+
+ settings = self.load_processor(args)
+
+ paths = [target.path for target in targets.include]
+
+ python = find_python(python_version)
+
+ results = self.test_paths(args, paths, python)
+ results = settings.process_errors(results, paths)
+
+ if results:
+ return SanityFailure(self.name, messages=results)
+
+ return SanitySuccess(self.name)
+
+ @staticmethod
+ def test_paths(args, paths, python):
+ """
+ :type args: SanityConfig
+ :type paths: list[str]
+ :type python: str
+ :rtype: list[SanityMessage]
+ """
+ cmd = [
+ python,
+ os.path.join(SANITY_ROOT, 'yamllint', 'yamllinter.py'),
+ ]
+
+ data = '\n'.join(paths)
+
+ display.info(data, verbosity=4)
+
+ try:
+ stdout, stderr = run_command(args, cmd, data=data, capture=True)
+ status = 0
+ except SubprocessError as ex:
+ stdout = ex.stdout
+ stderr = ex.stderr
+ status = ex.status
+
+ if stderr:
+ raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
+
+ if args.explain:
+ return []
+
+ results = json.loads(stdout)['messages']
+
+ results = [SanityMessage(
+ code=r['code'],
+ message=r['message'],
+ path=r['path'],
+ line=int(r['line']),
+ column=int(r['column']),
+ level=r['level'],
+ ) for r in results]
+
+ return results
diff --git a/test/lib/ansible_test/_internal/target.py b/test/lib/ansible_test/_internal/target.py
new file mode 100644
index 00000000..7bafd717
--- /dev/null
+++ b/test/lib/ansible_test/_internal/target.py
@@ -0,0 +1,694 @@
+"""Test target identification, iteration and inclusion/exclusion."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import collections
+import os
+import re
+import itertools
+import abc
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+ to_text,
+)
+
+from .io import (
+ read_text_file,
+)
+
+from .util import (
+ ApplicationError,
+ display,
+ read_lines_without_comments,
+ is_subdir,
+)
+
+from .data import (
+ data_context,
+)
+
+MODULE_EXTENSIONS = '.py', '.ps1'
+
+try:
+ TCompletionTarget = t.TypeVar('TCompletionTarget', bound='CompletionTarget')
+except AttributeError:
+ TCompletionTarget = None # pylint: disable=invalid-name
+
+try:
+ TIntegrationTarget = t.TypeVar('TIntegrationTarget', bound='IntegrationTarget')
+except AttributeError:
+ TIntegrationTarget = None # pylint: disable=invalid-name
+
+
+def find_target_completion(target_func, prefix):
+ """
+ :type target_func: () -> collections.Iterable[CompletionTarget]
+ :type prefix: unicode
+ :rtype: list[str]
+ """
+ try:
+ targets = target_func()
+ short = os.environ.get('COMP_TYPE') == '63' # double tab completion from bash
+ matches = walk_completion_targets(targets, prefix, short)
+ return matches
+ except Exception as ex: # pylint: disable=locally-disabled, broad-except
+ return [u'%s' % ex]
+
+
+def walk_completion_targets(targets, prefix, short=False):
+ """
+ :type targets: collections.Iterable[CompletionTarget]
+ :type prefix: str
+ :type short: bool
+ :rtype: tuple[str]
+ """
+ aliases = set(alias for target in targets for alias in target.aliases)
+
+ if prefix.endswith('/') and prefix in aliases:
+ aliases.remove(prefix)
+
+ matches = [alias for alias in aliases if alias.startswith(prefix) and '/' not in alias[len(prefix):-1]]
+
+ if short:
+ offset = len(os.path.dirname(prefix))
+ if offset:
+ offset += 1
+ relative_matches = [match[offset:] for match in matches if len(match) > offset]
+ if len(relative_matches) > 1:
+ matches = relative_matches
+
+ return tuple(sorted(matches))
+
+
+def walk_internal_targets(targets, includes=None, excludes=None, requires=None):
+ """
+ :type targets: collections.Iterable[T <= CompletionTarget]
+ :type includes: list[str]
+ :type excludes: list[str]
+ :type requires: list[str]
+ :rtype: tuple[T <= CompletionTarget]
+ """
+ targets = tuple(targets)
+
+ include_targets = sorted(filter_targets(targets, includes, errors=True, directories=False), key=lambda include_target: include_target.name)
+
+ if requires:
+ require_targets = set(filter_targets(targets, requires, errors=True, directories=False))
+ include_targets = [require_target for require_target in include_targets if require_target in require_targets]
+
+ if excludes:
+ list(filter_targets(targets, excludes, errors=True, include=False, directories=False))
+
+ internal_targets = set(filter_targets(include_targets, excludes, errors=False, include=False, directories=False))
+ return tuple(sorted(internal_targets, key=lambda sort_target: sort_target.name))
+
+
+def filter_targets(targets, # type: t.Iterable[TCompletionTarget]
+ patterns, # type: t.List[str]
+ include=True, # type: bool
+ directories=True, # type: bool
+ errors=True, # type: bool
+ ): # type: (...) -> t.Iterable[TCompletionTarget]
+ """Iterate over the given targets and filter them based on the supplied arguments."""
+ unmatched = set(patterns or ())
+ compiled_patterns = dict((p, re.compile('^%s$' % p)) for p in patterns) if patterns else None
+
+ for target in targets:
+ matched_directories = set()
+ match = False
+
+ if patterns:
+ for alias in target.aliases:
+ for pattern in patterns:
+ if compiled_patterns[pattern].match(alias):
+ match = True
+
+ try:
+ unmatched.remove(pattern)
+ except KeyError:
+ pass
+
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+ elif include:
+ match = True
+ if not target.base_path:
+ matched_directories.add('.')
+ for alias in target.aliases:
+ if alias.endswith('/'):
+ if target.base_path and len(target.base_path) > len(alias):
+ matched_directories.add(target.base_path)
+ else:
+ matched_directories.add(alias)
+
+ if match != include:
+ continue
+
+ if directories and matched_directories:
+ yield DirectoryTarget(to_text(sorted(matched_directories, key=len)[0]), target.modules)
+ else:
+ yield target
+
+ if errors:
+ if unmatched:
+ raise TargetPatternsNotMatched(unmatched)
+
+
+def walk_module_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ for target in walk_test_targets(path=data_context().content.module_path, module_path=data_context().content.module_path, extensions=MODULE_EXTENSIONS):
+ if not target.module:
+ continue
+
+ yield target
+
+
+def walk_units_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(path=data_context().content.unit_path, module_path=data_context().content.unit_module_path, extensions=('.py',), prefix='test_')
+
+
+def walk_compile_targets(include_symlinks=True):
+ """
+ :type include_symlinks: bool
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.py',), extra_dirs=('bin',), include_symlinks=include_symlinks)
+
+
+def walk_powershell_targets(include_symlinks=True):
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, extensions=('.ps1', '.psm1'), include_symlinks=include_symlinks)
+
+
+def walk_sanity_targets():
+ """
+ :rtype: collections.Iterable[TestTarget]
+ """
+ return walk_test_targets(module_path=data_context().content.module_path, include_symlinks=True, include_symlinked_directories=True)
+
+
+def walk_posix_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'posix/' in target.aliases or (include_hidden and 'hidden/posix/' in target.aliases):
+ yield target
+
+
+def walk_network_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'network/' in target.aliases or (include_hidden and 'hidden/network/' in target.aliases):
+ yield target
+
+
+def walk_windows_integration_targets(include_hidden=False):
+ """
+ :type include_hidden: bool
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ for target in walk_integration_targets():
+ if 'windows/' in target.aliases or (include_hidden and 'hidden/windows/' in target.aliases):
+ yield target
+
+
+def walk_integration_targets():
+ """
+ :rtype: collections.Iterable[IntegrationTarget]
+ """
+ path = data_context().content.integration_targets_path
+ modules = frozenset(target.module for target in walk_module_targets())
+ paths = data_context().content.walk_files(path)
+ prefixes = load_integration_prefixes()
+ targets_path_tuple = tuple(path.split(os.path.sep))
+
+ entry_dirs = (
+ 'defaults',
+ 'files',
+ 'handlers',
+ 'meta',
+ 'tasks',
+ 'templates',
+ 'vars',
+ )
+
+ entry_files = (
+ 'main.yml',
+ 'main.yaml',
+ )
+
+ entry_points = []
+
+ for entry_dir in entry_dirs:
+ for entry_file in entry_files:
+ entry_points.append(os.path.join(os.path.sep, entry_dir, entry_file))
+
+ # any directory with at least one file is a target
+ path_tuples = set(tuple(os.path.dirname(p).split(os.path.sep))
+ for p in paths)
+
+ # also detect targets which are ansible roles, looking for standard entry points
+ path_tuples.update(tuple(os.path.dirname(os.path.dirname(p)).split(os.path.sep))
+ for p in paths if any(p.endswith(entry_point) for entry_point in entry_points))
+
+ # remove the top-level directory if it was included
+ if targets_path_tuple in path_tuples:
+ path_tuples.remove(targets_path_tuple)
+
+ previous_path_tuple = None
+ paths = []
+
+ for path_tuple in sorted(path_tuples):
+ if previous_path_tuple and previous_path_tuple == path_tuple[:len(previous_path_tuple)]:
+ # ignore nested directories
+ continue
+
+ previous_path_tuple = path_tuple
+ paths.append(os.path.sep.join(path_tuple))
+
+ for path in paths:
+ yield IntegrationTarget(to_text(path), modules, prefixes)
+
+
+def load_integration_prefixes():
+ """
+ :rtype: dict[str, str]
+ """
+ path = data_context().content.integration_path
+ file_paths = sorted(f for f in data_context().content.get_files(path) if os.path.splitext(os.path.basename(f))[0] == 'target-prefixes')
+ prefixes = {}
+
+ for file_path in file_paths:
+ prefix = os.path.splitext(file_path)[1][1:]
+ prefixes.update(dict((k, prefix) for k in read_text_file(file_path).splitlines()))
+
+ return prefixes
+
+
+def walk_test_targets(path=None, module_path=None, extensions=None, prefix=None, extra_dirs=None, include_symlinks=False, include_symlinked_directories=False):
+ """
+ :type path: str | None
+ :type module_path: str | None
+ :type extensions: tuple[str] | None
+ :type prefix: str | None
+ :type extra_dirs: tuple[str] | None
+ :type include_symlinks: bool
+ :type include_symlinked_directories: bool
+ :rtype: collections.Iterable[TestTarget]
+ """
+ if path:
+ file_paths = data_context().content.walk_files(path, include_symlinked_directories=include_symlinked_directories)
+ else:
+ file_paths = data_context().content.all_files(include_symlinked_directories=include_symlinked_directories)
+
+ for file_path in file_paths:
+ name, ext = os.path.splitext(os.path.basename(file_path))
+
+ if extensions and ext not in extensions:
+ continue
+
+ if prefix and not name.startswith(prefix):
+ continue
+
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(to_text(file_path), module_path, prefix, path, symlink)
+
+ file_paths = []
+
+ if extra_dirs:
+ for extra_dir in extra_dirs:
+ for file_path in data_context().content.get_files(extra_dir):
+ file_paths.append(file_path)
+
+ for file_path in file_paths:
+ symlink = os.path.islink(to_bytes(file_path.rstrip(os.path.sep)))
+
+ if symlink and not include_symlinks:
+ continue
+
+ yield TestTarget(file_path, module_path, prefix, path, symlink)
+
+
+def analyze_integration_target_dependencies(integration_targets):
+ """
+ :type integration_targets: list[IntegrationTarget]
+ :rtype: dict[str,set[str]]
+ """
+ real_target_root = os.path.realpath(data_context().content.integration_targets_path) + '/'
+
+ role_targets = [target for target in integration_targets if target.type == 'role']
+ hidden_role_target_names = set(target.name for target in role_targets if 'hidden/' in target.aliases)
+
+ dependencies = collections.defaultdict(set)
+
+ # handle setup dependencies
+ for target in integration_targets:
+ for setup_target_name in target.setup_always + target.setup_once:
+ dependencies[setup_target_name].add(target.name)
+
+ # handle target dependencies
+ for target in integration_targets:
+ for need_target in target.needs_target:
+ dependencies[need_target].add(target.name)
+
+ # handle symlink dependencies between targets
+ # this use case is supported, but discouraged
+ for target in integration_targets:
+ for path in data_context().content.walk_files(target.path):
+ if not os.path.islink(to_bytes(path.rstrip(os.path.sep))):
+ continue
+
+ real_link_path = os.path.realpath(path)
+
+ if not real_link_path.startswith(real_target_root):
+ continue
+
+ link_target = real_link_path[len(real_target_root):].split('/')[0]
+
+ if link_target == target.name:
+ continue
+
+ dependencies[link_target].add(target.name)
+
+ # intentionally primitive analysis of role meta to avoid a dependency on pyyaml
+ # script based targets are scanned as they may execute a playbook with role dependencies
+ for target in integration_targets:
+ meta_dir = os.path.join(target.path, 'meta')
+
+ if not os.path.isdir(meta_dir):
+ continue
+
+ meta_paths = data_context().content.get_files(meta_dir)
+
+ for meta_path in meta_paths:
+ if os.path.exists(meta_path):
+ # try and decode the file as a utf-8 string, skip if it contains invalid chars (binary file)
+ try:
+ meta_lines = read_text_file(meta_path).splitlines()
+ except UnicodeDecodeError:
+ continue
+
+ for meta_line in meta_lines:
+ if re.search(r'^ *#.*$', meta_line):
+ continue
+
+ if not meta_line.strip():
+ continue
+
+ for hidden_target_name in hidden_role_target_names:
+ if hidden_target_name in meta_line:
+ dependencies[hidden_target_name].add(target.name)
+
+ while True:
+ changes = 0
+
+ for dummy, dependent_target_names in dependencies.items():
+ for dependent_target_name in list(dependent_target_names):
+ new_target_names = dependencies.get(dependent_target_name)
+
+ if new_target_names:
+ for new_target_name in new_target_names:
+ if new_target_name not in dependent_target_names:
+ dependent_target_names.add(new_target_name)
+ changes += 1
+
+ if not changes:
+ break
+
+ for target_name in sorted(dependencies):
+ consumers = dependencies[target_name]
+
+ if not consumers:
+ continue
+
+ display.info('%s:' % target_name, verbosity=4)
+
+ for consumer in sorted(consumers):
+ display.info(' %s' % consumer, verbosity=4)
+
+ return dependencies
+
+
+class CompletionTarget:
+ """Command-line argument completion target base class."""
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self):
+ self.name = None
+ self.path = None
+ self.base_path = None
+ self.modules = tuple()
+ self.aliases = tuple()
+
+ def __eq__(self, other):
+ if isinstance(other, CompletionTarget):
+ return self.__repr__() == other.__repr__()
+
+ return False
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ return self.name.__lt__(other.name)
+
+ def __gt__(self, other):
+ return self.name.__gt__(other.name)
+
+ def __hash__(self):
+ return hash(self.__repr__())
+
+ def __repr__(self):
+ if self.modules:
+ return '%s (%s)' % (self.name, ', '.join(self.modules))
+
+ return self.name
+
+
+class DirectoryTarget(CompletionTarget):
+ """Directory target."""
+ def __init__(self, path, modules):
+ """
+ :type path: str
+ :type modules: tuple[str]
+ """
+ super(DirectoryTarget, self).__init__()
+
+ self.name = path
+ self.path = path
+ self.modules = modules
+
+
+class TestTarget(CompletionTarget):
+ """Generic test target."""
+ def __init__(self, path, module_path, module_prefix, base_path, symlink=None):
+ """
+ :type path: str
+ :type module_path: str | None
+ :type module_prefix: str | None
+ :type base_path: str
+ :type symlink: bool | None
+ """
+ super(TestTarget, self).__init__()
+
+ if symlink is None:
+ symlink = os.path.islink(to_bytes(path.rstrip(os.path.sep)))
+
+ self.name = path
+ self.path = path
+ self.base_path = base_path + '/' if base_path else None
+ self.symlink = symlink
+
+ name, ext = os.path.splitext(os.path.basename(self.path))
+
+ if module_path and is_subdir(path, module_path) and name != '__init__' and ext in MODULE_EXTENSIONS:
+ self.module = name[len(module_prefix or ''):].lstrip('_')
+ self.modules = (self.module,)
+ else:
+ self.module = None
+ self.modules = tuple()
+
+ aliases = [self.path, self.module]
+ parts = self.path.split('/')
+
+ for i in range(1, len(parts)):
+ alias = '%s/' % '/'.join(parts[:i])
+ aliases.append(alias)
+
+ aliases = [a for a in aliases if a]
+
+ self.aliases = tuple(sorted(aliases))
+
+
+class IntegrationTarget(CompletionTarget):
+ """Integration test target."""
+ non_posix = frozenset((
+ 'network',
+ 'windows',
+ ))
+
+ categories = frozenset(non_posix | frozenset((
+ 'posix',
+ 'module',
+ 'needs',
+ 'skip',
+ )))
+
+ def __init__(self, path, modules, prefixes):
+ """
+ :type path: str
+ :type modules: frozenset[str]
+ :type prefixes: dict[str, str]
+ """
+ super(IntegrationTarget, self).__init__()
+
+ self.relative_path = os.path.relpath(path, data_context().content.integration_targets_path)
+ self.name = self.relative_path.replace(os.path.sep, '.')
+ self.path = path
+
+ # script_path and type
+
+ file_paths = data_context().content.get_files(path)
+ runme_path = os.path.join(path, 'runme.sh')
+
+ if runme_path in file_paths:
+ self.type = 'script'
+ self.script_path = runme_path
+ else:
+ self.type = 'role' # ansible will consider these empty roles, so ansible-test should as well
+ self.script_path = None
+
+ # static_aliases
+
+ aliases_path = os.path.join(path, 'aliases')
+
+ if aliases_path in file_paths:
+ static_aliases = tuple(read_lines_without_comments(aliases_path, remove_blank_lines=True))
+ else:
+ static_aliases = tuple()
+
+ # modules
+
+ if self.name in modules:
+ module_name = self.name
+ elif self.name.startswith('win_') and self.name[4:] in modules:
+ module_name = self.name[4:]
+ else:
+ module_name = None
+
+ self.modules = tuple(sorted(a for a in static_aliases + tuple([module_name]) if a in modules))
+
+ # groups
+
+ groups = [self.type]
+ groups += [a for a in static_aliases if a not in modules]
+ groups += ['module/%s' % m for m in self.modules]
+
+ if not self.modules:
+ groups.append('non_module')
+
+ if 'destructive' not in groups:
+ groups.append('non_destructive')
+
+ if '_' in self.name:
+ prefix = self.name[:self.name.find('_')]
+ else:
+ prefix = None
+
+ if prefix in prefixes:
+ group = prefixes[prefix]
+
+ if group != prefix:
+ group = '%s/%s' % (group, prefix)
+
+ groups.append(group)
+
+ if self.name.startswith('win_'):
+ groups.append('windows')
+
+ if self.name.startswith('connection_'):
+ groups.append('connection')
+
+ if self.name.startswith('setup_') or self.name.startswith('prepare_'):
+ groups.append('hidden')
+
+ if self.type not in ('script', 'role'):
+ groups.append('hidden')
+
+ targets_relative_path = data_context().content.integration_targets_path
+
+ # Collect skip entries before group expansion to avoid registering more specific skip entries as less specific versions.
+ self.skips = tuple(g for g in groups if g.startswith('skip/'))
+
+ # Collect file paths before group expansion to avoid including the directories.
+ # Ignore references to test targets, as those must be defined using `needs/target/*` or other target references.
+ self.needs_file = tuple(sorted(set('/'.join(g.split('/')[2:]) for g in groups if
+ g.startswith('needs/file/') and not g.startswith('needs/file/%s/' % targets_relative_path))))
+
+ # network platform
+ networks = [g.split('/')[1] for g in groups if g.startswith('network/')]
+ self.network_platform = networks[0] if networks else None
+
+ for group in itertools.islice(groups, 0, len(groups)):
+ if '/' in group:
+ parts = group.split('/')
+ for i in range(1, len(parts)):
+ groups.append('/'.join(parts[:i]))
+
+ if not any(g in self.non_posix for g in groups):
+ groups.append('posix')
+
+ # aliases
+
+ aliases = [self.name] + \
+ ['%s/' % g for g in groups] + \
+ ['%s/%s' % (g, self.name) for g in groups if g not in self.categories]
+
+ if 'hidden/' in aliases:
+ aliases = ['hidden/'] + ['hidden/%s' % a for a in aliases if not a.startswith('hidden/')]
+
+ self.aliases = tuple(sorted(set(aliases)))
+
+ # configuration
+
+ self.setup_once = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/once/'))))
+ self.setup_always = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('setup/always/'))))
+ self.needs_target = tuple(sorted(set(g.split('/')[2] for g in groups if g.startswith('needs/target/'))))
+
+
+class TargetPatternsNotMatched(ApplicationError):
+ """One or more targets were not matched when a match was required."""
+ def __init__(self, patterns):
+ """
+ :type patterns: set[str]
+ """
+ self.patterns = sorted(patterns)
+
+ if len(patterns) > 1:
+ message = 'Target patterns not matched:\n%s' % '\n'.join(self.patterns)
+ else:
+ message = 'Target pattern not matched: %s' % self.patterns[0]
+
+ super(TargetPatternsNotMatched, self).__init__(message)
diff --git a/test/lib/ansible_test/_internal/test.py b/test/lib/ansible_test/_internal/test.py
new file mode 100644
index 00000000..8d9629a9
--- /dev/null
+++ b/test/lib/ansible_test/_internal/test.py
@@ -0,0 +1,524 @@
+"""Classes for storing and processing test results."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import re
+
+from . import types as t
+
+from .util import (
+ display,
+ get_ansible_version,
+)
+
+from .util_common import (
+ write_text_test_results,
+ write_json_test_results,
+ ResultType,
+)
+
+from .config import (
+ TestConfig,
+)
+
+
+def calculate_best_confidence(choices, metadata):
+ """
+ :type choices: tuple[tuple[str, int]]
+ :type metadata: Metadata
+ :rtype: int
+ """
+ best_confidence = 0
+
+ for path, line in choices:
+ confidence = calculate_confidence(path, line, metadata)
+ best_confidence = max(confidence, best_confidence)
+
+ return best_confidence
+
+
+def calculate_confidence(path, line, metadata):
+ """
+ :type path: str
+ :type line: int
+ :type metadata: Metadata
+ :rtype: int
+ """
+ ranges = metadata.changes.get(path)
+
+ # no changes were made to the file
+ if not ranges:
+ return 0
+
+ # changes were made to the same file and line
+ if any(r[0] <= line <= r[1] in r for r in ranges):
+ return 100
+
+ # changes were made to the same file and the line number is unknown
+ if line == 0:
+ return 75
+
+ # changes were made to the same file and the line number is different
+ return 50
+
+
+class TestResult:
+ """Base class for test results."""
+ def __init__(self, command, test, python_version=None):
+ """
+ :type command: str
+ :type test: str
+ :type python_version: str
+ """
+ self.command = command
+ self.test = test
+ self.python_version = python_version
+ self.name = self.test or self.command
+
+ if self.python_version:
+ self.name += '-python-%s' % self.python_version
+
+ try:
+ import junit_xml
+ except ImportError:
+ junit_xml = None
+
+ self.junit = junit_xml
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ self.write_console()
+ self.write_bot(args)
+
+ if args.lint:
+ self.write_lint()
+
+ if args.junit:
+ if self.junit:
+ self.write_junit(args)
+ else:
+ display.warning('Skipping junit xml output because the `junit-xml` python package was not found.', unique=True)
+
+ def write_console(self):
+ """Write results to console."""
+
+ def write_lint(self):
+ """Write lint results to stdout."""
+
+ def write_bot(self, args):
+ """
+ :type args: TestConfig
+ """
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+
+ def create_result_name(self, extension):
+ """
+ :type extension: str
+ :rtype: str
+ """
+ name = 'ansible-test-%s' % self.command
+
+ if self.test:
+ name += '-%s' % self.test
+
+ if self.python_version:
+ name += '-python-%s' % self.python_version
+
+ name += extension
+
+ return name
+
+ def save_junit(self, args, test_case, properties=None):
+ """
+ :type args: TestConfig
+ :type test_case: junit_xml.TestCase
+ :type properties: dict[str, str] | None
+ :rtype: str | None
+ """
+ test_suites = [
+ self.junit.TestSuite(
+ name='ansible-test',
+ test_cases=[test_case],
+ timestamp=datetime.datetime.utcnow().replace(microsecond=0).isoformat(),
+ properties=properties,
+ ),
+ ]
+
+ # the junit_xml API is changing in version 2.0.0
+ # TestSuite.to_xml_string is being replaced with to_xml_report_string
+ # see: https://github.com/kyrus/python-junit-xml/blob/63db26da353790500642fd02cae1543eb41aab8b/junit_xml/__init__.py#L249-L261
+ try:
+ to_xml_string = self.junit.to_xml_report_string
+ except AttributeError:
+ to_xml_string = self.junit.TestSuite.to_xml_string
+
+ report = to_xml_string(test_suites=test_suites, prettyprint=True, encoding='utf-8')
+
+ if args.explain:
+ return
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), report)
+
+
+class TestTimeout(TestResult):
+ """Test timeout."""
+ def __init__(self, timeout_duration):
+ """
+ :type timeout_duration: int
+ """
+ super(TestTimeout, self).__init__(command='timeout', test='')
+
+ self.timeout_duration = timeout_duration
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ message = 'Tests were aborted after exceeding the %d minute time limit.' % self.timeout_duration
+
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ output = '''
+One or more of the following situations may be responsible:
+
+- Code changes have resulted in tests that hang or run for an excessive amount of time.
+- Tests have been added which exceed the time limit when combined with existing tests.
+- Test infrastructure and/or external dependencies are operating slower than normal.'''
+
+ if args.coverage:
+ output += '\n- Additional overhead from collecting code coverage has resulted in tests exceeding the time limit.'
+
+ output += '\n\nConsult the console log for additional details on where the timeout occurred.'
+
+ timestamp = datetime.datetime.utcnow().replace(microsecond=0).isoformat()
+
+ # hack to avoid requiring junit-xml, which may not be pre-installed outside our test containers
+ xml = '''
+<?xml version="1.0" encoding="utf-8"?>
+<testsuites disabled="0" errors="1" failures="0" tests="1" time="0.0">
+\t<testsuite disabled="0" errors="1" failures="0" file="None" log="None" name="ansible-test" skipped="0" tests="1" time="0" timestamp="%s" url="None">
+\t\t<testcase classname="timeout" name="timeout">
+\t\t\t<error message="%s" type="error">%s</error>
+\t\t</testcase>
+\t</testsuite>
+</testsuites>
+''' % (timestamp, message, output)
+
+ write_text_test_results(ResultType.JUNIT, self.create_result_name('.xml'), xml.lstrip())
+
+
+class TestSuccess(TestResult):
+ """Test success."""
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+
+ self.save_junit(args, test_case)
+
+
+class TestSkipped(TestResult):
+ """Test skipped."""
+ def write_console(self):
+ """Write results to console."""
+ display.info('No tests applicable.', verbosity=1)
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+ test_case.add_skipped_info('No tests applicable.')
+
+ self.save_junit(args, test_case)
+
+
+class TestFailure(TestResult):
+ """Test failure."""
+ def __init__(self, command, test, python_version=None, messages=None, summary=None):
+ """
+ :type command: str
+ :type test: str
+ :type python_version: str | None
+ :type messages: list[TestMessage] | None
+ :type summary: unicode | None
+ """
+ super(TestFailure, self).__init__(command, test, python_version)
+
+ if messages:
+ messages = sorted(messages)
+ else:
+ messages = []
+
+ self.messages = messages
+ self.summary = summary
+
+ def write(self, args):
+ """
+ :type args: TestConfig
+ """
+ if args.metadata.changes:
+ self.populate_confidence(args.metadata)
+
+ super(TestFailure, self).write(args)
+
+ def write_console(self):
+ """Write results to console."""
+ if self.summary:
+ display.error(self.summary)
+ else:
+ if self.python_version:
+ specifier = ' on python %s' % self.python_version
+ else:
+ specifier = ''
+
+ display.error('Found %d %s issue(s)%s which need to be resolved:' % (len(self.messages), self.test or self.command, specifier))
+
+ for message in self.messages:
+ display.error(message.format(show_confidence=True))
+
+ doc_url = self.find_docs()
+ if doc_url:
+ display.info('See documentation for help: %s' % doc_url)
+
+ def write_lint(self):
+ """Write lint results to stdout."""
+ if self.summary:
+ command = self.format_command()
+ message = 'The test `%s` failed. See stderr output for details.' % command
+ path = ''
+ message = TestMessage(message, path)
+ print(message)
+ else:
+ for message in self.messages:
+ print(message)
+
+ def write_junit(self, args):
+ """
+ :type args: TestConfig
+ """
+ title = self.format_title()
+ output = self.format_block()
+
+ test_case = self.junit.TestCase(classname=self.command, name=self.name)
+
+ # Include a leading newline to improve readability on Shippable "Tests" tab.
+ # Without this, the first line becomes indented.
+ test_case.add_failure_info(message=title, output='\n%s' % output)
+
+ self.save_junit(args, test_case)
+
+ def write_bot(self, args):
+ """
+ :type args: TestConfig
+ """
+ docs = self.find_docs()
+ message = self.format_title(help_link=docs)
+ output = self.format_block()
+
+ if self.messages:
+ verified = all((m.confidence or 0) >= 50 for m in self.messages)
+ else:
+ verified = False
+
+ bot_data = dict(
+ verified=verified,
+ docs=docs,
+ results=[
+ dict(
+ message=message,
+ output=output,
+ ),
+ ],
+ )
+
+ if args.explain:
+ return
+
+ write_json_test_results(ResultType.BOT, self.create_result_name('.json'), bot_data)
+
+ def populate_confidence(self, metadata):
+ """
+ :type metadata: Metadata
+ """
+ for message in self.messages:
+ if message.confidence is None:
+ message.confidence = calculate_confidence(message.path, message.line, metadata)
+
+ def format_command(self):
+ """
+ :rtype: str
+ """
+ command = 'ansible-test %s' % self.command
+
+ if self.test:
+ command += ' --test %s' % self.test
+
+ if self.python_version:
+ command += ' --python %s' % self.python_version
+
+ return command
+
+ def find_docs(self):
+ """
+ :rtype: str
+ """
+ if self.command != 'sanity':
+ return None # only sanity tests have docs links
+
+ # Use the major.minor version for the URL only if this a release that
+ # matches the pattern 2.4.0, otherwise, use 'devel'
+ ansible_version = get_ansible_version()
+ url_version = 'devel'
+ if re.search(r'^[0-9.]+$', ansible_version):
+ url_version = '.'.join(ansible_version.split('.')[:2])
+
+ testing_docs_url = 'https://docs.ansible.com/ansible/%s/dev_guide/testing' % url_version
+
+ url = '%s/%s/' % (testing_docs_url, self.command)
+
+ if self.test:
+ url += '%s.html' % self.test
+
+ return url
+
+ def format_title(self, help_link=None):
+ """
+ :type help_link: str | None
+ :rtype: str
+ """
+ command = self.format_command()
+
+ if self.summary:
+ reason = 'the error'
+ else:
+ reason = '1 error' if len(self.messages) == 1 else '%d errors' % len(self.messages)
+
+ if help_link:
+ help_link_markup = ' [[explain](%s)]' % help_link
+ else:
+ help_link_markup = ''
+
+ title = 'The test `%s`%s failed with %s:' % (command, help_link_markup, reason)
+
+ return title
+
+ def format_block(self):
+ """
+ :rtype: str
+ """
+ if self.summary:
+ block = self.summary
+ else:
+ block = '\n'.join(m.format() for m in self.messages)
+
+ message = block.strip()
+
+ # Hack to remove ANSI color reset code from SubprocessError messages.
+ message = message.replace(display.clear, '')
+
+ return message
+
+
+class TestMessage:
+ """Single test message for one file."""
+ def __init__(self, message, path, line=0, column=0, level='error', code=None, confidence=None):
+ """
+ :type message: str
+ :type path: str
+ :type line: int
+ :type column: int
+ :type level: str
+ :type code: str | None
+ :type confidence: int | None
+ """
+ self.__path = path
+ self.__line = line
+ self.__column = column
+ self.__level = level
+ self.__code = code
+ self.__message = message
+
+ self.confidence = confidence
+
+ @property
+ def path(self): # type: () -> str
+ """Return the path."""
+ return self.__path
+
+ @property
+ def line(self): # type: () -> int
+ """Return the line number, or 0 if none is available."""
+ return self.__line
+
+ @property
+ def column(self): # type: () -> int
+ """Return the column number, or 0 if none is available."""
+ return self.__column
+
+ @property
+ def level(self): # type: () -> str
+ """Return the level."""
+ return self.__level
+
+ @property
+ def code(self): # type: () -> t.Optional[str]
+ """Return the code, if any."""
+ return self.__code
+
+ @property
+ def message(self): # type: () -> str
+ """Return the message."""
+ return self.__message
+
+ @property
+ def tuple(self): # type: () -> t.Tuple[str, int, int, str, t.Optional[str], str]
+ """Return a tuple with all the immutable values of this test message."""
+ return self.__path, self.__line, self.__column, self.__level, self.__code, self.__message
+
+ def __lt__(self, other):
+ return self.tuple < other.tuple
+
+ def __le__(self, other):
+ return self.tuple <= other.tuple
+
+ def __eq__(self, other):
+ return self.tuple == other.tuple
+
+ def __ne__(self, other):
+ return self.tuple != other.tuple
+
+ def __gt__(self, other):
+ return self.tuple > other.tuple
+
+ def __ge__(self, other):
+ return self.tuple >= other.tuple
+
+ def __hash__(self):
+ return hash(self.tuple)
+
+ def __str__(self):
+ return self.format()
+
+ def format(self, show_confidence=False):
+ """
+ :type show_confidence: bool
+ :rtype: str
+ """
+ if self.__code:
+ msg = '%s: %s' % (self.__code, self.__message)
+ else:
+ msg = self.__message
+
+ if show_confidence and self.confidence is not None:
+ msg += ' (%d%%)' % self.confidence
+
+ return '%s:%s:%s: %s' % (self.__path, self.__line, self.__column, msg)
diff --git a/test/lib/ansible_test/_internal/thread.py b/test/lib/ansible_test/_internal/thread.py
new file mode 100644
index 00000000..49fbc1ba
--- /dev/null
+++ b/test/lib/ansible_test/_internal/thread.py
@@ -0,0 +1,57 @@
+"""Python threading tools."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import threading
+import sys
+
+try:
+ # noinspection PyPep8Naming
+ import Queue as queue
+except ImportError:
+ # noinspection PyUnresolvedReferences
+ import queue # pylint: disable=locally-disabled, import-error
+
+
+class WrappedThread(threading.Thread):
+ """Wrapper around Thread which captures results and exceptions."""
+ def __init__(self, action):
+ """
+ :type action: () -> any
+ """
+ # noinspection PyOldStyleClasses
+ super(WrappedThread, self).__init__()
+ self._result = queue.Queue()
+ self.action = action
+ self.result = None
+
+ def run(self):
+ """
+ Run action and capture results or exception.
+ Do not override. Do not call directly. Executed by the start() method.
+ """
+ # We truly want to catch anything that the worker thread might do including call sys.exit.
+ # Therefore we catch *everything* (including old-style class exceptions)
+ # noinspection PyBroadException, PyPep8
+ try:
+ self._result.put((self.action(), None))
+ # pylint: disable=locally-disabled, bare-except
+ except: # noqa
+ self._result.put((None, sys.exc_info()))
+
+ def wait_for_result(self):
+ """
+ Wait for thread to exit and return the result or raise an exception.
+ :rtype: any
+ """
+ result, exception = self._result.get()
+
+ if exception:
+ if sys.version_info[0] > 2:
+ raise exception[1].with_traceback(exception[2])
+ # noinspection PyRedundantParentheses
+ exec('raise exception[0], exception[1], exception[2]') # pylint: disable=locally-disabled, exec-used
+
+ self.result = result
+
+ return result
diff --git a/test/lib/ansible_test/_internal/types.py b/test/lib/ansible_test/_internal/types.py
new file mode 100644
index 00000000..46ef7066
--- /dev/null
+++ b/test/lib/ansible_test/_internal/types.py
@@ -0,0 +1,32 @@
+"""Import wrapper for type hints when available."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+TYPE_CHECKING = False
+
+try:
+ from typing import (
+ Any,
+ AnyStr,
+ BinaryIO,
+ Callable,
+ Dict,
+ FrozenSet,
+ Generator,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Text,
+ TextIO,
+ Tuple,
+ Type,
+ TYPE_CHECKING,
+ TypeVar,
+ Union,
+ )
+except ImportError:
+ pass
diff --git a/test/lib/ansible_test/_internal/units/__init__.py b/test/lib/ansible_test/_internal/units/__init__.py
new file mode 100644
index 00000000..22145431
--- /dev/null
+++ b/test/lib/ansible_test/_internal/units/__init__.py
@@ -0,0 +1,159 @@
+"""Execute unit tests using pytest."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ..util import (
+ ANSIBLE_TEST_DATA_ROOT,
+ display,
+ get_available_python_versions,
+ is_subdir,
+ SubprocessError,
+ REMOTE_ONLY_PYTHON_VERSIONS,
+)
+
+from ..util_common import (
+ intercept_command,
+ ResultType,
+ handle_layout_messages,
+)
+
+from ..ansible_util import (
+ ansible_environment,
+ check_pyyaml,
+)
+
+from ..target import (
+ walk_internal_targets,
+ walk_units_targets,
+)
+
+from ..config import (
+ UnitsConfig,
+)
+
+from ..coverage_util import (
+ coverage_context,
+)
+
+from ..data import (
+ data_context,
+)
+
+from ..executor import (
+ AllTargetsSkipped,
+ Delegate,
+ get_changes_filter,
+ install_command_requirements,
+ SUPPORTED_PYTHON_VERSIONS,
+)
+
+
+def command_units(args):
+ """
+ :type args: UnitsConfig
+ """
+ handle_layout_messages(data_context().content.unit_messages)
+
+ changes = get_changes_filter(args)
+ require = args.require + changes
+ include = walk_internal_targets(walk_units_targets(), args.include, args.exclude, require)
+
+ paths = [target.path for target in include]
+ remote_paths = [path for path in paths
+ if is_subdir(path, data_context().content.unit_module_path)
+ or is_subdir(path, data_context().content.unit_module_utils_path)]
+
+ if not paths:
+ raise AllTargetsSkipped()
+
+ if args.python and args.python in REMOTE_ONLY_PYTHON_VERSIONS and not remote_paths:
+ raise AllTargetsSkipped()
+
+ if args.delegate:
+ raise Delegate(require=changes, exclude=args.exclude)
+
+ version_commands = []
+
+ available_versions = sorted(get_available_python_versions(list(SUPPORTED_PYTHON_VERSIONS)).keys())
+
+ for version in SUPPORTED_PYTHON_VERSIONS:
+ # run all versions unless version given, in which case run only that version
+ if args.python and version != args.python_version:
+ continue
+
+ if not args.python and version not in available_versions:
+ display.warning("Skipping unit tests on Python %s due to missing interpreter." % version)
+ continue
+
+ if args.requirements_mode != 'skip':
+ install_command_requirements(args, version)
+
+ env = ansible_environment(args)
+
+ cmd = [
+ 'pytest',
+ '--boxed',
+ '-r', 'a',
+ '-n', str(args.num_workers) if args.num_workers else 'auto',
+ '--color',
+ 'yes' if args.color else 'no',
+ '-p', 'no:cacheprovider',
+ '-c', os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest.ini'),
+ '--junit-xml', os.path.join(ResultType.JUNIT.path, 'python%s-units.xml' % version),
+ ]
+
+ if not data_context().content.collection:
+ cmd.append('--durations=25')
+
+ if version != '2.6':
+ # added in pytest 4.5.0, which requires python 2.7+
+ cmd.append('--strict-markers')
+
+ plugins = []
+
+ if args.coverage:
+ plugins.append('ansible_pytest_coverage')
+
+ if data_context().content.collection:
+ plugins.append('ansible_pytest_collections')
+
+ if plugins:
+ env['PYTHONPATH'] += ':%s' % os.path.join(ANSIBLE_TEST_DATA_ROOT, 'pytest/plugins')
+ env['PYTEST_PLUGINS'] = ','.join(plugins)
+
+ if args.collect_only:
+ cmd.append('--collect-only')
+
+ if args.verbosity:
+ cmd.append('-' + ('v' * args.verbosity))
+
+ if version in REMOTE_ONLY_PYTHON_VERSIONS:
+ test_paths = remote_paths
+ else:
+ test_paths = paths
+
+ if not test_paths:
+ continue
+
+ cmd.extend(test_paths)
+
+ version_commands.append((version, cmd, env))
+
+ if args.requirements_mode == 'only':
+ sys.exit()
+
+ for version, command, env in version_commands:
+ check_pyyaml(args, version)
+
+ display.info('Unit test with Python %s' % version)
+
+ try:
+ with coverage_context(args):
+ intercept_command(args, command, target_name='units', env=env, python_version=version)
+ except SubprocessError as ex:
+ # pytest exits with status code 5 when all tests are skipped, which isn't an error for our use case
+ if ex.status != 5:
+ raise
diff --git a/test/lib/ansible_test/_internal/util.py b/test/lib/ansible_test/_internal/util.py
new file mode 100644
index 00000000..005c3e05
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util.py
@@ -0,0 +1,853 @@
+"""Miscellaneous utility functions and classes."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import contextlib
+import errno
+import fcntl
+import inspect
+import os
+import pkgutil
+import random
+import re
+import shutil
+import socket
+import stat
+import string
+import subprocess
+import sys
+import tempfile
+import time
+import zipfile
+
+from struct import unpack, pack
+from termios import TIOCGWINSZ
+
+try:
+ from abc import ABC
+except ImportError:
+ from abc import ABCMeta
+ ABC = ABCMeta('ABC', (), {})
+
+try:
+ # noinspection PyCompatibility
+ from configparser import ConfigParser
+except ImportError:
+ # noinspection PyCompatibility,PyUnresolvedReferences
+ from ConfigParser import SafeConfigParser as ConfigParser
+
+try:
+ # noinspection PyProtectedMember
+ from shlex import quote as cmd_quote
+except ImportError:
+ # noinspection PyProtectedMember
+ from pipes import quote as cmd_quote
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+ to_optional_bytes,
+ to_optional_text,
+)
+
+from .io import (
+ open_binary_file,
+ read_text_file,
+)
+
+try:
+ C = t.TypeVar('C')
+except AttributeError:
+ C = None
+
+
+PYTHON_PATHS = {} # type: t.Dict[str, str]
+
+try:
+ # noinspection PyUnresolvedReferences
+ MAXFD = subprocess.MAXFD
+except AttributeError:
+ MAXFD = -1
+
+COVERAGE_CONFIG_NAME = 'coveragerc'
+
+ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+# assume running from install
+ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT)
+ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
+ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible')
+ANSIBLE_SOURCE_ROOT = None
+
+if not os.path.exists(ANSIBLE_LIB_ROOT):
+ # running from source
+ ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT)))
+ ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin')
+ ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible')
+ ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT
+
+ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data')
+ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config')
+
+# Modes are set to allow all users the same level of access.
+# This permits files to be used in tests that change users.
+# The only exception is write access to directories for the user creating them.
+# This avoids having to modify the directory permissions a second time.
+
+MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
+
+MODE_FILE = MODE_READ
+MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+
+MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH
+
+REMOTE_ONLY_PYTHON_VERSIONS = (
+ '2.6',
+)
+
+SUPPORTED_PYTHON_VERSIONS = (
+ '2.6',
+ '2.7',
+ '3.5',
+ '3.6',
+ '3.7',
+ '3.8',
+ '3.9',
+)
+
+
+def remove_file(path):
+ """
+ :type path: str
+ """
+ if os.path.isfile(path):
+ os.remove(path)
+
+
+def read_lines_without_comments(path, remove_blank_lines=False, optional=False): # type: (str, bool, bool) -> t.List[str]
+ """
+ Returns lines from the specified text file with comments removed.
+ Comments are any content from a hash symbol to the end of a line.
+ Any spaces immediately before a comment are also removed.
+ """
+ if optional and not os.path.exists(path):
+ return []
+
+ lines = read_text_file(path).splitlines()
+
+ lines = [re.sub(r' *#.*$', '', line) for line in lines]
+
+ if remove_blank_lines:
+ lines = [line for line in lines if line]
+
+ return lines
+
+
+def find_executable(executable, cwd=None, path=None, required=True):
+ """
+ :type executable: str
+ :type cwd: str
+ :type path: str
+ :type required: bool | str
+ :rtype: str | None
+ """
+ match = None
+ real_cwd = os.getcwd()
+
+ if not cwd:
+ cwd = real_cwd
+
+ if os.path.dirname(executable):
+ target = os.path.join(cwd, executable)
+ if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
+ match = executable
+ else:
+ if path is None:
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if path:
+ path_dirs = path.split(os.path.pathsep)
+ seen_dirs = set()
+
+ for path_dir in path_dirs:
+ if path_dir in seen_dirs:
+ continue
+
+ seen_dirs.add(path_dir)
+
+ if os.path.abspath(path_dir) == real_cwd:
+ path_dir = cwd
+
+ candidate = os.path.join(path_dir, executable)
+
+ if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
+ match = candidate
+ break
+
+ if not match and required:
+ message = 'Required program "%s" not found.' % executable
+
+ if required != 'warning':
+ raise ApplicationError(message)
+
+ display.warning(message)
+
+ return match
+
+
+def find_python(version, path=None, required=True):
+ """
+ :type version: str
+ :type path: str | None
+ :type required: bool
+ :rtype: str
+ """
+ version_info = tuple(int(n) for n in version.split('.'))
+
+ if not path and version_info == sys.version_info[:len(version_info)]:
+ python_bin = sys.executable
+ else:
+ python_bin = find_executable('python%s' % version, path=path, required=required)
+
+ return python_bin
+
+
+def get_ansible_version(): # type: () -> str
+ """Return the Ansible version."""
+ try:
+ return get_ansible_version.version
+ except AttributeError:
+ pass
+
+ # ansible may not be in our sys.path
+ # avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation
+ load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release')
+
+ # noinspection PyUnresolvedReferences
+ from ansible_release import __version__ as ansible_version # pylint: disable=import-error
+
+ get_ansible_version.version = ansible_version
+
+ return ansible_version
+
+
+def get_available_python_versions(versions): # type: (t.List[str]) -> t.Dict[str, str]
+ """Return a dictionary indicating which of the requested Python versions are available."""
+ try:
+ return get_available_python_versions.result
+ except AttributeError:
+ pass
+
+ get_available_python_versions.result = dict((version, path) for version, path in
+ ((version, find_python(version, required=False)) for version in versions) if path)
+
+ return get_available_python_versions.result
+
+
+def generate_pip_command(python):
+ """
+ :type python: str
+ :rtype: list[str]
+ """
+ return [python, os.path.join(ANSIBLE_TEST_DATA_ROOT, 'quiet_pip.py')]
+
+
+def raw_command(cmd, capture=False, env=None, data=None, cwd=None, explain=False, stdin=None, stdout=None,
+ cmd_verbosity=1, str_errors='strict'):
+ """
+ :type cmd: collections.Iterable[str]
+ :type capture: bool
+ :type env: dict[str, str] | None
+ :type data: str | None
+ :type cwd: str | None
+ :type explain: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type cmd_verbosity: int
+ :type str_errors: str
+ :rtype: str | None, str | None
+ """
+ if not cwd:
+ cwd = os.getcwd()
+
+ if not env:
+ env = common_environment()
+
+ cmd = list(cmd)
+
+ escaped_cmd = ' '.join(cmd_quote(c) for c in cmd)
+
+ display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True)
+ display.info('Working directory: %s' % cwd, verbosity=2)
+
+ program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
+
+ if program:
+ display.info('Program found: %s' % program, verbosity=2)
+
+ for key in sorted(env.keys()):
+ display.info('%s=%s' % (key, env[key]), verbosity=2)
+
+ if explain:
+ return None, None
+
+ communicate = False
+
+ if stdin is not None:
+ data = None
+ communicate = True
+ elif data is not None:
+ stdin = subprocess.PIPE
+ communicate = True
+
+ if stdout:
+ communicate = True
+
+ if capture:
+ stdout = stdout or subprocess.PIPE
+ stderr = subprocess.PIPE
+ communicate = True
+ else:
+ stderr = None
+
+ start = time.time()
+ process = None
+
+ try:
+ try:
+ cmd_bytes = [to_bytes(c) for c in cmd]
+ env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
+ process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd)
+ except OSError as ex:
+ if ex.errno == errno.ENOENT:
+ raise ApplicationError('Required program "%s" not found.' % cmd[0])
+ raise
+
+ if communicate:
+ data_bytes = to_optional_bytes(data)
+ stdout_bytes, stderr_bytes = process.communicate(data_bytes)
+ stdout_text = to_optional_text(stdout_bytes, str_errors) or u''
+ stderr_text = to_optional_text(stderr_bytes, str_errors) or u''
+ else:
+ process.wait()
+ stdout_text, stderr_text = None, None
+ finally:
+ if process and process.returncode is None:
+ process.kill()
+ display.info('') # the process we're interrupting may have completed a partial line of output
+ display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.')
+
+ status = process.returncode
+ runtime = time.time() - start
+
+ display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
+
+ if status == 0:
+ return stdout_text, stderr_text
+
+ raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime)
+
+
+def common_environment():
+ """Common environment used for executing all programs."""
+ env = dict(
+ LC_ALL='en_US.UTF-8',
+ PATH=os.environ.get('PATH', os.path.defpath),
+ )
+
+ required = (
+ 'HOME',
+ )
+
+ optional = (
+ 'HTTPTESTER',
+ 'LD_LIBRARY_PATH',
+ 'SSH_AUTH_SOCK',
+ # MacOS High Sierra Compatibility
+ # http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
+ # Example configuration for macOS:
+ # export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
+ 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY',
+ 'ANSIBLE_KEEP_REMOTE_FILES',
+ # MacOS Homebrew Compatibility
+ # https://cryptography.io/en/latest/installation/#building-cryptography-on-macos
+ # This may also be required to install pyyaml with libyaml support when installed in non-standard locations.
+ # Example configuration for brew on macOS:
+ # export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/"
+ # export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/"
+ # However, this is not adequate for PyYAML 3.13, which is the latest version supported on Python 2.6.
+ # For that version the standard location must be used, or `pip install` must be invoked with additional options:
+ # --global-option=build_ext --global-option=-L{path_to_lib_dir}
+ 'LDFLAGS',
+ 'CFLAGS',
+ )
+
+ env.update(pass_vars(required=required, optional=optional))
+
+ return env
+
+
+def pass_vars(required, optional):
+ """
+ :type required: collections.Iterable[str]
+ :type optional: collections.Iterable[str]
+ :rtype: dict[str, str]
+ """
+ env = {}
+
+ for name in required:
+ if name not in os.environ:
+ raise MissingEnvironmentVariable(name)
+ env[name] = os.environ[name]
+
+ for name in optional:
+ if name not in os.environ:
+ continue
+ env[name] = os.environ[name]
+
+ return env
+
+
+def deepest_path(path_a, path_b):
+ """Return the deepest of two paths, or None if the paths are unrelated.
+ :type path_a: str
+ :type path_b: str
+ :rtype: str | None
+ """
+ if path_a == '.':
+ path_a = ''
+
+ if path_b == '.':
+ path_b = ''
+
+ if path_a.startswith(path_b):
+ return path_a or '.'
+
+ if path_b.startswith(path_a):
+ return path_b or '.'
+
+ return None
+
+
+def remove_tree(path):
+ """
+ :type path: str
+ """
+ try:
+ shutil.rmtree(to_bytes(path))
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+
+def is_binary_file(path):
+ """
+ :type path: str
+ :rtype: bool
+ """
+ assume_text = set([
+ '.cfg',
+ '.conf',
+ '.crt',
+ '.cs',
+ '.css',
+ '.html',
+ '.ini',
+ '.j2',
+ '.js',
+ '.json',
+ '.md',
+ '.pem',
+ '.ps1',
+ '.psm1',
+ '.py',
+ '.rst',
+ '.sh',
+ '.txt',
+ '.xml',
+ '.yaml',
+ '.yml',
+ ])
+
+ assume_binary = set([
+ '.bin',
+ '.eot',
+ '.gz',
+ '.ico',
+ '.iso',
+ '.jpg',
+ '.otf',
+ '.p12',
+ '.png',
+ '.pyc',
+ '.rpm',
+ '.ttf',
+ '.woff',
+ '.woff2',
+ '.zip',
+ ])
+
+ ext = os.path.splitext(path)[1]
+
+ if ext in assume_text:
+ return False
+
+ if ext in assume_binary:
+ return True
+
+ with open_binary_file(path) as path_fd:
+ # noinspection PyTypeChecker
+ return b'\0' in path_fd.read(4096)
+
+
+def generate_password():
+ """Generate a random password.
+ :rtype: str
+ """
+ chars = [
+ string.ascii_letters,
+ string.digits,
+ string.ascii_letters,
+ string.digits,
+ '-',
+ ] * 4
+
+ password = ''.join([random.choice(char) for char in chars[:-1]])
+
+ display.sensitive.add(password)
+
+ return password
+
+
+class Display:
+ """Manages color console output."""
+ clear = '\033[0m'
+ red = '\033[31m'
+ green = '\033[32m'
+ yellow = '\033[33m'
+ blue = '\033[34m'
+ purple = '\033[35m'
+ cyan = '\033[36m'
+
+ verbosity_colors = {
+ 0: None,
+ 1: green,
+ 2: blue,
+ 3: cyan,
+ }
+
+ def __init__(self):
+ self.verbosity = 0
+ self.color = sys.stdout.isatty()
+ self.warnings = []
+ self.warnings_unique = set()
+ self.info_stderr = False
+ self.rows = 0
+ self.columns = 0
+ self.truncate = 0
+ self.redact = True
+ self.sensitive = set()
+
+ if os.isatty(0):
+ self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2]
+
+ def __warning(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr)
+
+ def review_warnings(self):
+ """Review all warnings which previously occurred."""
+ if not self.warnings:
+ return
+
+ self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
+
+ for warning in self.warnings:
+ self.__warning(warning)
+
+ def warning(self, message, unique=False, verbosity=0):
+ """
+ :type message: str
+ :type unique: bool
+ :type verbosity: int
+ """
+ if verbosity > self.verbosity:
+ return
+
+ if unique:
+ if message in self.warnings_unique:
+ return
+
+ self.warnings_unique.add(message)
+
+ self.__warning(message)
+ self.warnings.append(message)
+
+ def notice(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr)
+
+ def error(self, message):
+ """
+ :type message: str
+ """
+ self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr)
+
+ def info(self, message, verbosity=0, truncate=False):
+ """
+ :type message: str
+ :type verbosity: int
+ :type truncate: bool
+ """
+ if self.verbosity >= verbosity:
+ color = self.verbosity_colors.get(verbosity, self.yellow)
+ self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate)
+
+ def print_message(self, message, color=None, fd=sys.stdout, truncate=False): # pylint: disable=locally-disabled, invalid-name
+ """
+ :type message: str
+ :type color: str | None
+ :type fd: file
+ :type truncate: bool
+ """
+ if self.redact and self.sensitive:
+ for item in self.sensitive:
+ if not item:
+ continue
+
+ message = message.replace(item, '*' * len(item))
+
+ if truncate:
+ if len(message) > self.truncate > 5:
+ message = message[:self.truncate - 5] + ' ...'
+
+ if color and self.color:
+ # convert color resets in message to desired color
+ message = message.replace(self.clear, color)
+ message = '%s%s%s' % (color, message, self.clear)
+
+ if sys.version_info[0] == 2:
+ message = to_bytes(message)
+
+ print(message, file=fd)
+ fd.flush()
+
+
+class ApplicationError(Exception):
+ """General application error."""
+
+
+class ApplicationWarning(Exception):
+ """General application warning which interrupts normal program flow."""
+
+
+class SubprocessError(ApplicationError):
+ """Error resulting from failed subprocess execution."""
+ def __init__(self, cmd, status=0, stdout=None, stderr=None, runtime=None):
+ """
+ :type cmd: list[str]
+ :type status: int
+ :type stdout: str | None
+ :type stderr: str | None
+ :type runtime: float | None
+ """
+ message = 'Command "%s" returned exit status %s.\n' % (' '.join(cmd_quote(c) for c in cmd), status)
+
+ if stderr:
+ message += '>>> Standard Error\n'
+ message += '%s%s\n' % (stderr.strip(), Display.clear)
+
+ if stdout:
+ message += '>>> Standard Output\n'
+ message += '%s%s\n' % (stdout.strip(), Display.clear)
+
+ message = message.strip()
+
+ super(SubprocessError, self).__init__(message)
+
+ self.cmd = cmd
+ self.message = message
+ self.status = status
+ self.stdout = stdout
+ self.stderr = stderr
+ self.runtime = runtime
+
+
+class MissingEnvironmentVariable(ApplicationError):
+ """Error caused by missing environment variable."""
+ def __init__(self, name):
+ """
+ :type name: str
+ """
+ super(MissingEnvironmentVariable, self).__init__('Missing environment variable: %s' % name)
+
+ self.name = name
+
+
+def parse_to_list_of_dict(pattern, value):
+ """
+ :type pattern: str
+ :type value: str
+ :return: list[dict[str, str]]
+ """
+ matched = []
+ unmatched = []
+
+ for line in value.splitlines():
+ match = re.search(pattern, line)
+
+ if match:
+ matched.append(match.groupdict())
+ else:
+ unmatched.append(line)
+
+ if unmatched:
+ raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched)))
+
+ return matched
+
+
+def get_available_port():
+ """
+ :rtype: int
+ """
+ # this relies on the kernel not reusing previously assigned ports immediately
+ socket_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ with contextlib.closing(socket_fd):
+ socket_fd.bind(('', 0))
+ return socket_fd.getsockname()[1]
+
+
+def get_subclasses(class_type): # type: (t.Type[C]) -> t.Set[t.Type[C]]
+ """Returns the set of types that are concrete subclasses of the given type."""
+ subclasses = set() # type: t.Set[t.Type[C]]
+ queue = [class_type] # type: t.List[t.Type[C]]
+
+ while queue:
+ parent = queue.pop()
+
+ for child in parent.__subclasses__():
+ if child not in subclasses:
+ if not inspect.isabstract(child):
+ subclasses.add(child)
+ queue.append(child)
+
+ return subclasses
+
+
+def is_subdir(candidate_path, path): # type: (str, str) -> bool
+ """Returns true if candidate_path is path or a subdirectory of path."""
+ if not path.endswith(os.path.sep):
+ path += os.path.sep
+
+ if not candidate_path.endswith(os.path.sep):
+ candidate_path += os.path.sep
+
+ return candidate_path.startswith(path)
+
+
+def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str]
+ """Returns a list of directories extracted from the given list of paths."""
+ dir_names = set()
+
+ for path in paths:
+ while True:
+ path = os.path.dirname(path)
+
+ if not path or path == os.path.sep:
+ break
+
+ dir_names.add(path + os.path.sep)
+
+ return sorted(dir_names)
+
+
+def str_to_version(version): # type: (str) -> t.Tuple[int, ...]
+ """Return a version tuple from a version string."""
+ return tuple(int(n) for n in version.split('.'))
+
+
+def version_to_str(version): # type: (t.Tuple[int, ...]) -> str
+ """Return a version string from a version tuple."""
+ return '.'.join(str(n) for n in version)
+
+
+def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None
+ """
+ Import plugins from the given directory relative to the given root.
+ If the root is not provided, the 'lib' directory for the test runner will be used.
+ """
+ if root is None:
+ root = os.path.dirname(__file__)
+
+ path = os.path.join(root, directory)
+ package = __name__.rsplit('.', 1)[0]
+ prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.'))
+
+ for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix):
+ module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py')
+ load_module(module_path, name)
+
+
+def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
+ """
+ Load plugins of the specified type and track them in the specified database.
+ Only plugins which have already been imported will be loaded.
+ """
+ plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]]
+
+ for plugin in plugins:
+ database[plugin] = plugins[plugin]
+
+
+def load_module(path, name): # type: (str, str) -> None
+ """Load a Python module using the given name and path."""
+ if name in sys.modules:
+ return
+
+ if sys.version_info >= (3, 4):
+ # noinspection PyUnresolvedReferences
+ import importlib.util
+
+ # noinspection PyUnresolvedReferences
+ spec = importlib.util.spec_from_file_location(name, path)
+ # noinspection PyUnresolvedReferences
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
+
+ sys.modules[name] = module
+ else:
+ # noinspection PyDeprecation
+ import imp
+
+ # load_source (and thus load_module) require a file opened with `open` in text mode
+ with open(to_bytes(path)) as module_file:
+ # noinspection PyDeprecation
+ imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE))
+
+
+@contextlib.contextmanager
+def tempdir(): # type: () -> str
+ """Creates a temporary directory that is deleted outside the context scope."""
+ temp_path = tempfile.mkdtemp()
+ yield temp_path
+ shutil.rmtree(temp_path)
+
+
+@contextlib.contextmanager
+def open_zipfile(path, mode='r'):
+ """Opens a zip file and closes the file automatically."""
+ zib_obj = zipfile.ZipFile(path, mode=mode)
+ yield zib_obj
+ zib_obj.close()
+
+
+display = Display() # pylint: disable=locally-disabled, invalid-name
diff --git a/test/lib/ansible_test/_internal/util_common.py b/test/lib/ansible_test/_internal/util_common.py
new file mode 100644
index 00000000..1ac2e60d
--- /dev/null
+++ b/test/lib/ansible_test/_internal/util_common.py
@@ -0,0 +1,487 @@
+"""Common utility code that depends on CommonConfig."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import atexit
+import contextlib
+import os
+import shutil
+import sys
+import tempfile
+import textwrap
+
+from . import types as t
+
+from .encoding import (
+ to_bytes,
+)
+
+from .util import (
+ common_environment,
+ COVERAGE_CONFIG_NAME,
+ display,
+ find_python,
+ remove_tree,
+ MODE_DIRECTORY,
+ MODE_FILE_EXECUTE,
+ PYTHON_PATHS,
+ raw_command,
+ read_lines_without_comments,
+ ANSIBLE_TEST_DATA_ROOT,
+ ApplicationError,
+)
+
+from .io import (
+ write_text_file,
+ write_json_file,
+)
+
+from .data import (
+ data_context,
+)
+
+from .provider.layout import (
+ LayoutMessages,
+)
+
+DOCKER_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+REMOTE_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+NETWORK_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
+
+
+class ResultType:
+ """Test result type."""
+ BOT = None # type: ResultType
+ COVERAGE = None # type: ResultType
+ DATA = None # type: ResultType
+ JUNIT = None # type: ResultType
+ LOGS = None # type: ResultType
+ REPORTS = None # type: ResultType
+ TMP = None # type: ResultType
+
+ @staticmethod
+ def _populate():
+ ResultType.BOT = ResultType('bot')
+ ResultType.COVERAGE = ResultType('coverage')
+ ResultType.DATA = ResultType('data')
+ ResultType.JUNIT = ResultType('junit')
+ ResultType.LOGS = ResultType('logs')
+ ResultType.REPORTS = ResultType('reports')
+ ResultType.TMP = ResultType('.tmp')
+
+ def __init__(self, name): # type: (str) -> None
+ self.name = name
+
+ @property
+ def relative_path(self): # type: () -> str
+ """The content relative path to the results."""
+ return os.path.join(data_context().content.results_path, self.name)
+
+ @property
+ def path(self): # type: () -> str
+ """The absolute path to the results."""
+ return os.path.join(data_context().content.root, self.relative_path)
+
+ def __str__(self): # type: () -> str
+ return self.name
+
+
+# noinspection PyProtectedMember
+ResultType._populate() # pylint: disable=protected-access
+
+
+class CommonConfig:
+ """Configuration common to all commands."""
+ def __init__(self, args, command):
+ """
+ :type args: any
+ :type command: str
+ """
+ self.command = command
+
+ self.color = args.color # type: bool
+ self.explain = args.explain # type: bool
+ self.verbosity = args.verbosity # type: int
+ self.debug = args.debug # type: bool
+ self.truncate = args.truncate # type: int
+ self.redact = args.redact # type: bool
+
+ self.info_stderr = False # type: bool
+
+ self.cache = {}
+
+ def get_ansible_config(self): # type: () -> str
+ """Return the path to the Ansible config for the given config."""
+ return os.path.join(ANSIBLE_TEST_DATA_ROOT, 'ansible.cfg')
+
+
+class NetworkPlatformSettings:
+ """Settings required for provisioning a network platform."""
+ def __init__(self, collection, inventory_vars): # type: (str, t.Type[str, str]) -> None
+ self.collection = collection
+ self.inventory_vars = inventory_vars
+
+
+def get_docker_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(DOCKER_COMPLETION, 'docker')
+
+
+def get_remote_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(REMOTE_COMPLETION, 'remote')
+
+
+def get_network_completion():
+ """
+ :rtype: dict[str, dict[str, str]]
+ """
+ return get_parameterized_completion(NETWORK_COMPLETION, 'network')
+
+
+def get_parameterized_completion(cache, name):
+ """
+ :type cache: dict[str, dict[str, str]]
+ :type name: str
+ :rtype: dict[str, dict[str, str]]
+ """
+ if not cache:
+ if data_context().content.collection:
+ context = 'collection'
+ else:
+ context = 'ansible-base'
+
+ images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
+
+ cache.update(dict(kvp for kvp in [parse_parameterized_completion(i) for i in images] if kvp and kvp[1].get('context', context) == context))
+
+ return cache
+
+
+def parse_parameterized_completion(value): # type: (str) -> t.Optional[t.Tuple[str, t.Dict[str, str]]]
+ """Parse the given completion entry, returning the entry name and a dictionary of key/value settings."""
+ values = value.split()
+
+ if not values:
+ return None
+
+ name = values[0]
+ data = dict((kvp[0], kvp[1] if len(kvp) > 1 else '') for kvp in [item.split('=', 1) for item in values[1:]])
+
+ return name, data
+
+
+def docker_qualify_image(name):
+ """
+ :type name: str
+ :rtype: str
+ """
+ config = get_docker_completion().get(name, {})
+
+ return config.get('name', name)
+
+
+def get_network_settings(args, platform, version): # type: (NetworkIntegrationConfig, str, str) -> NetworkPlatformSettings
+ """Returns settings for the given network platform and version."""
+ platform_version = '%s/%s' % (platform, version)
+ completion = get_network_completion().get(platform_version, {})
+ collection = args.platform_collection.get(platform, completion.get('collection'))
+
+ settings = NetworkPlatformSettings(
+ collection,
+ dict(
+ ansible_connection=args.platform_connection.get(platform, completion.get('connection')),
+ ansible_network_os='%s.%s' % (collection, platform) if collection else platform,
+ )
+ )
+
+ return settings
+
+
+def handle_layout_messages(messages): # type: (t.Optional[LayoutMessages]) -> None
+ """Display the given layout messages."""
+ if not messages:
+ return
+
+ for message in messages.info:
+ display.info(message, verbosity=1)
+
+ for message in messages.warning:
+ display.warning(message)
+
+ if messages.error:
+ raise ApplicationError('\n'.join(messages.error))
+
+
+@contextlib.contextmanager
+def named_temporary_file(args, prefix, suffix, directory, content):
+ """
+ :param args: CommonConfig
+ :param prefix: str
+ :param suffix: str
+ :param directory: str
+ :param content: str | bytes | unicode
+ :rtype: str
+ """
+ if args.explain:
+ yield os.path.join(directory, '%stemp%s' % (prefix, suffix))
+ else:
+ with tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix, dir=directory) as tempfile_fd:
+ tempfile_fd.write(to_bytes(content))
+ tempfile_fd.flush()
+
+ yield tempfile_fd.name
+
+
+def write_json_test_results(category, # type: ResultType
+ name, # type: str
+ content, # type: t.Union[t.List[t.Any], t.Dict[str, t.Any]]
+ formatted=True, # type: bool
+ encoder=None, # type: t.Optional[t.Callable[[t.Any], t.Any]]
+ ): # type: (...) -> None
+ """Write the given json content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_json_file(path, content, create_directories=True, formatted=formatted, encoder=encoder)
+
+
+def write_text_test_results(category, name, content): # type: (ResultType, str, str) -> None
+ """Write the given text content to the specified test results path, creating directories as needed."""
+ path = os.path.join(category.path, name)
+ write_text_file(path, content, create_directories=True)
+
+
+def get_python_path(args, interpreter):
+ """
+ :type args: TestConfig
+ :type interpreter: str
+ :rtype: str
+ """
+ python_path = PYTHON_PATHS.get(interpreter)
+
+ if python_path:
+ return python_path
+
+ prefix = 'python-'
+ suffix = '-ansible'
+
+ root_temp_dir = '/tmp'
+
+ if args.explain:
+ return os.path.join(root_temp_dir, ''.join((prefix, 'temp', suffix)))
+
+ python_path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_temp_dir)
+ injected_interpreter = os.path.join(python_path, 'python')
+
+ # A symlink is faster than the execv wrapper, but isn't compatible with virtual environments.
+ # Attempt to detect when it is safe to use a symlink by checking the real path of the interpreter.
+ use_symlink = os.path.dirname(os.path.realpath(interpreter)) == os.path.dirname(interpreter)
+
+ if use_symlink:
+ display.info('Injecting "%s" as a symlink to the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
+
+ os.symlink(interpreter, injected_interpreter)
+ else:
+ display.info('Injecting "%s" as a execv wrapper for the "%s" interpreter.' % (injected_interpreter, interpreter), verbosity=1)
+
+ create_interpreter_wrapper(interpreter, injected_interpreter)
+
+ os.chmod(python_path, MODE_DIRECTORY)
+
+ if not PYTHON_PATHS:
+ atexit.register(cleanup_python_paths)
+
+ PYTHON_PATHS[interpreter] = python_path
+
+ return python_path
+
+
+def create_temp_dir(prefix=None, suffix=None, base_dir=None): # type: (t.Optional[str], t.Optional[str], t.Optional[str]) -> str
+ """Create a temporary directory that persists until the current process exits."""
+ temp_path = tempfile.mkdtemp(prefix=prefix or 'tmp', suffix=suffix or '', dir=base_dir)
+ atexit.register(remove_tree, temp_path)
+ return temp_path
+
+
+def create_interpreter_wrapper(interpreter, injected_interpreter): # type: (str, str) -> None
+ """Create a wrapper for the given Python interpreter at the specified path."""
+ # sys.executable is used for the shebang to guarantee it is a binary instead of a script
+ # injected_interpreter could be a script from the system or our own wrapper created for the --venv option
+ shebang_interpreter = sys.executable
+
+ code = textwrap.dedent('''
+ #!%s
+
+ from __future__ import absolute_import
+
+ from os import execv
+ from sys import argv
+
+ python = '%s'
+
+ execv(python, [python] + argv[1:])
+ ''' % (shebang_interpreter, interpreter)).lstrip()
+
+ write_text_file(injected_interpreter, code)
+
+ os.chmod(injected_interpreter, MODE_FILE_EXECUTE)
+
+
+def cleanup_python_paths():
+ """Clean up all temporary python directories."""
+ for path in sorted(PYTHON_PATHS.values()):
+ display.info('Cleaning up temporary python directory: %s' % path, verbosity=2)
+ shutil.rmtree(path)
+
+
+def get_coverage_environment(args, target_name, version, temp_path, module_coverage, remote_temp_path=None):
+ """
+ :type args: TestConfig
+ :type target_name: str
+ :type version: str
+ :type temp_path: str
+ :type module_coverage: bool
+ :type remote_temp_path: str | None
+ :rtype: dict[str, str]
+ """
+ if temp_path:
+ # integration tests (both localhost and the optional testhost)
+ # config and results are in a temporary directory
+ coverage_config_base_path = temp_path
+ coverage_output_base_path = temp_path
+ elif args.coverage_config_base_path:
+ # unit tests, sanity tests and other special cases (localhost only)
+ # config is in a temporary directory
+ # results are in the source tree
+ coverage_config_base_path = args.coverage_config_base_path
+ coverage_output_base_path = os.path.join(data_context().content.root, data_context().content.results_path)
+ else:
+ raise Exception('No temp path and no coverage config base path. Check for missing coverage_context usage.')
+
+ config_file = os.path.join(coverage_config_base_path, COVERAGE_CONFIG_NAME)
+ coverage_file = os.path.join(coverage_output_base_path, ResultType.COVERAGE.name, '%s=%s=%s=%s=coverage' % (
+ args.command, target_name, args.coverage_label or 'local-%s' % version, 'python-%s' % version))
+
+ if not args.explain and not os.path.exists(config_file):
+ raise Exception('Missing coverage config file: %s' % config_file)
+
+ if args.coverage_check:
+ # cause the 'coverage' module to be found, but not imported or enabled
+ coverage_file = ''
+
+ # Enable code coverage collection on local Python programs (this does not include Ansible modules).
+ # Used by the injectors to support code coverage.
+ # Used by the pytest unit test plugin to support code coverage.
+ # The COVERAGE_FILE variable is also used directly by the 'coverage' module.
+ env = dict(
+ COVERAGE_CONF=config_file,
+ COVERAGE_FILE=coverage_file,
+ )
+
+ if module_coverage:
+ # Enable code coverage collection on Ansible modules (both local and remote).
+ # Used by the AnsiballZ wrapper generator in lib/ansible/executor/module_common.py to support code coverage.
+ env.update(dict(
+ _ANSIBLE_COVERAGE_CONFIG=config_file,
+ _ANSIBLE_COVERAGE_OUTPUT=coverage_file,
+ ))
+
+ if remote_temp_path:
+ # Include the command, target and label so the remote host can create a filename with that info. The remote
+ # is responsible for adding '={language version}=coverage.{hostname}.{pid}.{id}'
+ env['_ANSIBLE_COVERAGE_REMOTE_OUTPUT'] = os.path.join(remote_temp_path, '%s=%s=%s' % (
+ args.command, target_name, args.coverage_label or 'remote'))
+ env['_ANSIBLE_COVERAGE_REMOTE_WHITELIST'] = os.path.join(data_context().content.root, '*')
+
+ return env
+
+
+def intercept_command(args, cmd, target_name, env, capture=False, data=None, cwd=None, python_version=None, temp_path=None, module_coverage=True,
+ virtualenv=None, disable_coverage=False, remote_temp_path=None):
+ """
+ :type args: TestConfig
+ :type cmd: collections.Iterable[str]
+ :type target_name: str
+ :type env: dict[str, str]
+ :type capture: bool
+ :type data: str | None
+ :type cwd: str | None
+ :type python_version: str | None
+ :type temp_path: str | None
+ :type module_coverage: bool
+ :type virtualenv: str | None
+ :type disable_coverage: bool
+ :type remote_temp_path: str | None
+ :rtype: str | None, str | None
+ """
+ if not env:
+ env = common_environment()
+ else:
+ env = env.copy()
+
+ cmd = list(cmd)
+ version = python_version or args.python_version
+ interpreter = virtualenv or find_python(version)
+ inject_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'injector')
+
+ if not virtualenv:
+ # injection of python into the path is required when not activating a virtualenv
+ # otherwise scripts may find the wrong interpreter or possibly no interpreter
+ python_path = get_python_path(args, interpreter)
+ inject_path = python_path + os.path.pathsep + inject_path
+
+ env['PATH'] = inject_path + os.path.pathsep + env['PATH']
+ env['ANSIBLE_TEST_PYTHON_VERSION'] = version
+ env['ANSIBLE_TEST_PYTHON_INTERPRETER'] = interpreter
+
+ if args.coverage and not disable_coverage:
+ # add the necessary environment variables to enable code coverage collection
+ env.update(get_coverage_environment(args, target_name, version, temp_path, module_coverage,
+ remote_temp_path=remote_temp_path))
+
+ return run_command(args, cmd, capture=capture, env=env, data=data, cwd=cwd)
+
+
+def resolve_csharp_ps_util(import_name, path):
+ """
+ :type import_name: str
+ :type path: str
+ """
+ if data_context().content.is_ansible or not import_name.startswith('.'):
+ # We don't support relative paths for builtin utils, there's no point.
+ return import_name
+
+ packages = import_name.split('.')
+ module_packages = path.split(os.path.sep)
+
+ for package in packages:
+ if not module_packages or package:
+ break
+ del module_packages[-1]
+
+ return 'ansible_collections.%s%s' % (data_context().content.prefix,
+ '.'.join(module_packages + [p for p in packages if p]))
+
+
+def run_command(args, cmd, capture=False, env=None, data=None, cwd=None, always=False, stdin=None, stdout=None,
+ cmd_verbosity=1, str_errors='strict'):
+ """
+ :type args: CommonConfig
+ :type cmd: collections.Iterable[str]
+ :type capture: bool
+ :type env: dict[str, str] | None
+ :type data: str | None
+ :type cwd: str | None
+ :type always: bool
+ :type stdin: file | None
+ :type stdout: file | None
+ :type cmd_verbosity: int
+ :type str_errors: str
+ :rtype: str | None, str | None
+ """
+ explain = args.explain and not always
+ return raw_command(cmd, capture=capture, env=env, data=data, cwd=cwd, explain=explain, stdin=stdin, stdout=stdout,
+ cmd_verbosity=cmd_verbosity, str_errors=str_errors)
diff --git a/test/lib/ansible_test/_internal/venv.py b/test/lib/ansible_test/_internal/venv.py
new file mode 100644
index 00000000..37eef367
--- /dev/null
+++ b/test/lib/ansible_test/_internal/venv.py
@@ -0,0 +1,227 @@
+"""Virtual environment management."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import sys
+
+from . import types as t
+
+from .config import (
+ EnvironmentConfig,
+)
+
+from .util import (
+ find_python,
+ SubprocessError,
+ get_available_python_versions,
+ SUPPORTED_PYTHON_VERSIONS,
+ ANSIBLE_TEST_DATA_ROOT,
+ display,
+ remove_tree,
+)
+
+from .util_common import (
+ run_command,
+)
+
+
+def create_virtual_environment(args, # type: EnvironmentConfig
+ version, # type: str
+ path, # type: str
+ system_site_packages=False, # type: bool
+ pip=True, # type: bool
+ ): # type: (...) -> bool
+ """Create a virtual environment using venv or virtualenv for the requested Python version."""
+ if os.path.isdir(path):
+ display.info('Using existing Python %s virtual environment: %s' % (version, path), verbosity=1)
+ return True
+
+ python = find_python(version, required=False)
+ python_version = tuple(int(v) for v in version.split('.'))
+
+ if not python:
+ # the requested python version could not be found
+ return False
+
+ if python_version >= (3, 0):
+ # use the built-in 'venv' module on Python 3.x
+ # creating a virtual environment using 'venv' when running in a virtual environment created by 'virtualenv' results
+ # in a copy of the original virtual environment instead of creation of a new one
+ # avoid this issue by only using "real" python interpreters to invoke 'venv'
+ for real_python in iterate_real_pythons(args, version):
+ if run_venv(args, real_python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "venv": %s' % (version, path), verbosity=1)
+ return True
+
+ # something went wrong, most likely the package maintainer for the Python installation removed ensurepip
+ # which will prevent creation of a virtual environment without installation of other OS packages
+
+ # use the installed 'virtualenv' module on the Python requested version
+ if run_virtualenv(args, python, python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv": %s' % (version, path), verbosity=1)
+ return True
+
+ available_pythons = get_available_python_versions(SUPPORTED_PYTHON_VERSIONS)
+
+ for available_python_version, available_python_interpreter in sorted(available_pythons.items()):
+ virtualenv_version = get_virtualenv_version(args, available_python_interpreter)
+
+ if not virtualenv_version:
+ # virtualenv not available for this Python or we were unable to detect the version
+ continue
+
+ if python_version == (2, 6) and virtualenv_version >= (16, 0, 0):
+ # virtualenv 16.0.0 dropped python 2.6 support: https://virtualenv.pypa.io/en/latest/changes/#v16-0-0-2018-05-16
+ continue
+
+ # try using 'virtualenv' from another Python to setup the desired version
+ if run_virtualenv(args, available_python_interpreter, python, system_site_packages, pip, path):
+ display.info('Created Python %s virtual environment using "virtualenv" on Python %s: %s' % (version, available_python_version, path), verbosity=1)
+ return True
+
+ # no suitable 'virtualenv' available
+ return False
+
+
+def iterate_real_pythons(args, version): # type: (EnvironmentConfig, str) -> t.Iterable[str]
+ """
+ Iterate through available real python interpreters of the requested version.
+ The current interpreter will be checked and then the path will be searched.
+ """
+ version_info = tuple(int(n) for n in version.split('.'))
+ current_python = None
+
+ if version_info == sys.version_info[:len(version_info)]:
+ current_python = sys.executable
+ real_prefix = get_python_real_prefix(args, current_python)
+
+ if real_prefix:
+ current_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if current_python:
+ yield current_python
+
+ path = os.environ.get('PATH', os.path.defpath)
+
+ if not path:
+ return
+
+ found_python = find_python(version, path)
+
+ if not found_python:
+ return
+
+ if found_python == current_python:
+ return
+
+ real_prefix = get_python_real_prefix(args, found_python)
+
+ if real_prefix:
+ found_python = find_python(version, os.path.join(real_prefix, 'bin'))
+
+ if found_python:
+ yield found_python
+
+
+def get_python_real_prefix(args, path): # type: (EnvironmentConfig, str) -> t.Optional[str]
+ """
+ Return the real prefix of the specified interpreter or None if the interpreter is not a virtual environment created by 'virtualenv'.
+ """
+ cmd = [path, os.path.join(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'virtualenvcheck.py'))]
+ check_result = json.loads(run_command(args, cmd, capture=True, always=True)[0])
+ real_prefix = check_result['real_prefix']
+ return real_prefix
+
+
+def run_venv(args, # type: EnvironmentConfig
+ run_python, # type: str
+ system_site_packages, # type: bool
+ pip, # type: bool
+ path, # type: str
+ ): # type: (...) -> bool
+ """Create a virtual environment using the 'venv' module. Not available on Python 2.x."""
+ cmd = [run_python, '-m', 'venv']
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--without-pip')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex)
+
+ return False
+
+ return True
+
+
+def run_virtualenv(args, # type: EnvironmentConfig
+ run_python, # type: str
+ env_python, # type: str
+ system_site_packages, # type: bool
+ pip, # type: bool
+ path, # type: str
+ ): # type: (...) -> bool
+ """Create a virtual environment using the 'virtualenv' module."""
+ # always specify --python to guarantee the desired interpreter is provided
+ # otherwise virtualenv may select a different interpreter than the one running virtualenv
+ cmd = [run_python, '-m', 'virtualenv', '--python', env_python]
+
+ if system_site_packages:
+ cmd.append('--system-site-packages')
+
+ if not pip:
+ cmd.append('--no-pip')
+
+ cmd.append(path)
+
+ try:
+ run_command(args, cmd, capture=True)
+ except SubprocessError as ex:
+ remove_tree(path)
+
+ if args.verbosity > 1:
+ display.error(ex)
+
+ return False
+
+ return True
+
+
+def get_virtualenv_version(args, python): # type: (EnvironmentConfig, str) -> t.Optional[t.Tuple[int, ...]]
+ """Get the virtualenv version for the given python intepreter, if available."""
+ try:
+ return get_virtualenv_version.result
+ except AttributeError:
+ pass
+
+ get_virtualenv_version.result = None
+
+ cmd = [python, '-m', 'virtualenv', '--version']
+
+ try:
+ stdout = run_command(args, cmd, capture=True)[0]
+ except SubprocessError as ex:
+ if args.verbosity > 1:
+ display.error(ex)
+
+ stdout = ''
+
+ if stdout:
+ # noinspection PyBroadException
+ try:
+ get_virtualenv_version.result = tuple(int(v) for v in stdout.strip().split('.'))
+ except Exception: # pylint: disable=broad-except
+ pass
+
+ return get_virtualenv_version.result
diff --git a/test/lib/ansible_test/config/cloud-config-aws.ini.template b/test/lib/ansible_test/config/cloud-config-aws.ini.template
new file mode 100644
index 00000000..88b9fea6
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-aws.ini.template
@@ -0,0 +1,26 @@
+# This is the configuration template for ansible-test AWS integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned AWS credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary AWS credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of AWS credentials requires an ansible-core-ci API key.
+
+[default]
+aws_access_key: @ACCESS_KEY
+aws_secret_key: @SECRET_KEY
+security_token: @SECURITY_TOKEN
+aws_region: @REGION
+# aws_cleanup controls whether the environment is cleaned up after tests have completed
+# This only applies to tests that have a cleanup stage
+# Defaults to true when using this template
+# aws_cleanup: true
+# aliases for backwards compatibility with older integration test playbooks
+ec2_access_key: {{ aws_access_key }}
+ec2_secret_key: {{ aws_secret_key }}
+ec2_region: {{ aws_region }}
diff --git a/test/lib/ansible_test/config/cloud-config-azure.ini.template b/test/lib/ansible_test/config/cloud-config-azure.ini.template
new file mode 100644
index 00000000..ac5266ba
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-azure.ini.template
@@ -0,0 +1,32 @@
+# This is the configuration template for ansible-test Azure integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Azure credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Azure credentials,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Azure credentials requires one of:
+# 1) ansible-core-ci API key in ~/.ansible-core-ci.key
+# 2) Sherlock URL (including API key) in ~/.ansible-sherlock-ci.cfg
+
+[default]
+# Provide either Service Principal or Active Directory credentials below.
+
+# Service Principal
+AZURE_CLIENT_ID:
+AZURE_SECRET:
+AZURE_SUBSCRIPTION_ID:
+AZURE_TENANT:
+
+# Active Directory
+AZURE_AD_USER:
+AZURE_PASSWORD:
+AZURE_SUBSCRIPTION_ID:
+
+# Resource Groups
+RESOURCE_GROUP:
+RESOURCE_GROUP_SECONDARY:
diff --git a/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template
new file mode 100644
index 00000000..1c99e9b8
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-cloudscale.ini.template
@@ -0,0 +1,9 @@
+# This is the configuration template for ansible-test cloudscale integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+
+[default]
+cloudscale_api_token = @API_TOKEN
diff --git a/test/lib/ansible_test/config/cloud-config-cs.ini.template b/test/lib/ansible_test/config/cloud-config-cs.ini.template
new file mode 100644
index 00000000..f8d8a915
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-cs.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test CloudStack integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test.
+#
+# If you do not want to use the automatically provided CloudStack simulator,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the simulator.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the simulator.
+
+[default]
+endpoint = http://@HOST:@PORT/client/api
+key = @KEY
+secret = @SECRET
+timeout = 60
diff --git a/test/lib/ansible_test/config/cloud-config-gcp.ini.template b/test/lib/ansible_test/config/cloud-config-gcp.ini.template
new file mode 100644
index 00000000..00a20971
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-gcp.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test GCP integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned cloudstack-sim docker container in ansible-test.
+#
+# If you do not want to use the automatically provided GCP simulator,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the simulator.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the simulator.
+
+[default]
+gcp_project: @PROJECT
+gcp_cred_file: @CRED_FILE
+gcp_cred_kind: @CRED_KIND
+gcp_cred_email: @CRED_EMAIL
diff --git a/test/lib/ansible_test/config/cloud-config-hcloud.ini.template b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template
new file mode 100644
index 00000000..8db658db
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-hcloud.ini.template
@@ -0,0 +1,15 @@
+# This is the configuration template for ansible-test Hetzner Cloud integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Hetzner Cloud credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Hetzner Cloud credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Hetzner Cloud credentials requires an ansible-core-ci API key.
+
+[default]
+hcloud_api_token= @TOKEN
diff --git a/test/lib/ansible_test/config/cloud-config-opennebula.ini.template b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template
new file mode 100644
index 00000000..00c56db1
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-opennebula.ini.template
@@ -0,0 +1,20 @@
+# This is the configuration template for ansible-test OpenNebula integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Running integration tests against previously recorded XMLRPC fixtures
+#
+# If you want to test against a Live OpenNebula platform,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+#
+# If you run with @FIXTURES enabled (true) then you can decide if you want to
+# run in @REPLAY mode (true) or, record mode (false).
+
+[default]
+opennebula_url: @URL
+opennebula_username: @USERNAME
+opennebula_password: @PASSWORD
+opennebula_test_fixture: @FIXTURES
+opennebula_test_fixture_replay: @REPLAY \ No newline at end of file
diff --git a/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template
new file mode 100644
index 00000000..0a10f23b
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-openshift.kubeconfig.template
@@ -0,0 +1,12 @@
+# This is the configuration template for ansible-test OpenShift integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned openshift-origin docker container in ansible-test.
+#
+# If you do not want to use the automatically provided OpenShift container,
+# place your kubeconfig file next to this file, with the same name, but without the .template extension.
+# This will cause ansible-test to use the given configuration and not launch the automatically provided container.
+#
+# It is recommended that you DO NOT use this template unless you cannot use the automatically provided container.
diff --git a/test/lib/ansible_test/config/cloud-config-scaleway.ini.template b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template
new file mode 100644
index 00000000..f10419e0
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-scaleway.ini.template
@@ -0,0 +1,13 @@
+# This is the configuration template for ansible-test Scaleway integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+# If you want to test against the Vultr public API,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+
+[default]
+key = @KEY
+org = @ORG
diff --git a/test/lib/ansible_test/config/cloud-config-tower.ini.template b/test/lib/ansible_test/config/cloud-config-tower.ini.template
new file mode 100644
index 00000000..c76740ab
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-tower.ini.template
@@ -0,0 +1,18 @@
+# This is the configuration template for ansible-test Tower integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned Tower credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary Tower credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of Tower credentials requires an ansible-core-ci API key.
+
+[default]
+version=@VERSION
+host=@HOST
+username=@USERNAME
+password=@PASSWORD
diff --git a/test/lib/ansible_test/config/cloud-config-vcenter.ini.template b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template
new file mode 100644
index 00000000..eff8bf74
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-vcenter.ini.template
@@ -0,0 +1,26 @@
+# This is the configuration template for ansible-test VMware integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the automatically provisioned VMware credentials in ansible-test.
+#
+# If you do not want to use the automatically provisioned temporary VMware credentials,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration instead of temporary credentials.
+#
+# NOTE: Automatic provisioning of VMware credentials requires an ansible-core-ci API key.
+
+[DEFAULT]
+vcenter_username: @VMWARE_USERNAME
+vcenter_password: @VMWARE_PASSWORD
+vcenter_hostname: @VMWARE_HOSTNAME
+vmware_validate_certs: @VMWARE_VALIDATE_CERTS
+esxi1_username: @ESXI1_USERNAME
+esxi1_hostname: @ESXI1_HOSTNAME
+esxi1_password: @ESXI1_PASSWORD
+esxi2_username: @ESXI2_USERNAME
+esxi2_hostname: @ESXI2_HOSTNAME
+esxi2_password: @ESXI2_PASSWORD
+vmware_proxy_host: @VMWARE_PROXY_HOST
+vmware_proxy_port: @VMWARE_PROXY_PORT
diff --git a/test/lib/ansible_test/config/cloud-config-vultr.ini.template b/test/lib/ansible_test/config/cloud-config-vultr.ini.template
new file mode 100644
index 00000000..48b82108
--- /dev/null
+++ b/test/lib/ansible_test/config/cloud-config-vultr.ini.template
@@ -0,0 +1,12 @@
+# This is the configuration template for ansible-test Vultr integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+#
+# If you want to test against the Vultr public API,
+# fill in the values below and save this file without the .template extension.
+# This will cause ansible-test to use the given configuration.
+
+[default]
+key = @KEY
diff --git a/test/lib/ansible_test/config/inventory.networking.template b/test/lib/ansible_test/config/inventory.networking.template
new file mode 100644
index 00000000..a1545684
--- /dev/null
+++ b/test/lib/ansible_test/config/inventory.networking.template
@@ -0,0 +1,42 @@
+# This is the configuration template for ansible-test network-integration tests.
+#
+# You do not need this template if you are:
+#
+# 1) Running integration tests without using ansible-test.
+# 2) Using the `--platform` option to provision temporary network instances on EC2.
+#
+# If you do not want to use the automatically provisioned temporary network instances,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+#
+# NOTE: Automatic provisioning of network instances on EC2 requires an ansible-core-ci API key.
+
+[@OS]
+@NAME ansible_connection="local" ansible_host=@HOST ansible_network_os="@OS" ansible_user="@USER" ansible_ssh_private_key_file="@KEY_FILE"
+
+[aci:vars]
+aci_hostname=your-apic-1
+aci_username=admin
+aci_password=your-apic-password
+aci_validate_certs=no
+aci_use_ssl=yes
+aci_use_proxy=no
+
+[aci]
+localhost ansible_ssh_host=127.0.0.1 ansible_connection=local
+
+[mso:vars]
+mso_hostname=your-mso-1
+mso_username=admin
+mso_password=your-mso-password
+mso_validate_certs=no
+mso_use_ssl=yes
+mso_use_proxy=no
+
+[mso]
+localhost ansible_ssh_host=127.0.0.1 ansible_connection=local
+
+###
+# Example
+#
+# [vyos]
+# vyos01.example.net ansible_connection=local ansible_network_os="vyos" ansible_user=admin ansible_ssh_pass=mypassword
diff --git a/test/lib/ansible_test/config/inventory.winrm.template b/test/lib/ansible_test/config/inventory.winrm.template
new file mode 100644
index 00000000..34bbee2d
--- /dev/null
+++ b/test/lib/ansible_test/config/inventory.winrm.template
@@ -0,0 +1,28 @@
+# This is the configuration template for ansible-test windows-integration tests.
+# It can also be used with the legacy `make` based method of running tests.
+#
+# You do not need this template if you are:
+#
+# 1) Using the `--windows` option to provision temporary Windows instances on EC2.
+#
+# If you do not want to use the automatically provisioned temporary Windows instances,
+# fill in the @VAR placeholders below and save this file without the .template extension.
+#
+# NOTE: Automatic provisioning of Windows instances on EC2 requires an ansible-core-ci API key.
+#
+# REMINDER: Standard ports for winrm are 5985 (HTTP) and 5986 (HTTPS).
+
+[windows]
+@NAME ansible_host=@HOST ansible_user=@USER ansible_password=@PASSWORD ansible_port=@PORT
+
+[windows:vars]
+ansible_connection=winrm
+ansible_winrm_server_cert_validation=ignore
+
+# support winrm connection tests (temporary solution, does not support testing enable/disable of pipelining)
+[winrm:children]
+windows
+
+# support tests that target testhost
+[testhost:children]
+windows